1 1.115 ozaki /* $NetBSD: if_vioif.c,v 1.115 2025/07/15 05:09:28 ozaki-r Exp $ */ 2 1.1 hannken 3 1.1 hannken /* 4 1.66 reinoud * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 1.1 hannken * Copyright (c) 2010 Minoura Makoto. 6 1.1 hannken * All rights reserved. 7 1.1 hannken * 8 1.1 hannken * Redistribution and use in source and binary forms, with or without 9 1.1 hannken * modification, are permitted provided that the following conditions 10 1.1 hannken * are met: 11 1.1 hannken * 1. Redistributions of source code must retain the above copyright 12 1.1 hannken * notice, this list of conditions and the following disclaimer. 13 1.1 hannken * 2. Redistributions in binary form must reproduce the above copyright 14 1.1 hannken * notice, this list of conditions and the following disclaimer in the 15 1.1 hannken * documentation and/or other materials provided with the distribution. 16 1.1 hannken * 17 1.1 hannken * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 1.1 hannken * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 1.1 hannken * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 1.1 hannken * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 1.1 hannken * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 1.1 hannken * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 1.1 hannken * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 1.1 hannken * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 1.1 hannken * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 1.1 hannken * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 1.1 hannken */ 28 1.1 hannken 29 1.1 hannken #include <sys/cdefs.h> 30 1.115 ozaki __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.115 2025/07/15 05:09:28 ozaki-r Exp $"); 31 1.15 ozaki 32 1.15 ozaki #ifdef _KERNEL_OPT 33 1.15 ozaki #include "opt_net_mpsafe.h" 34 1.15 ozaki #endif 35 1.1 hannken 36 1.1 hannken #include <sys/param.h> 37 1.1 hannken #include <sys/systm.h> 38 1.1 hannken #include <sys/kernel.h> 39 1.55 yamaguch #include <sys/atomic.h> 40 1.1 hannken #include <sys/bus.h> 41 1.1 hannken #include <sys/condvar.h> 42 1.1 hannken #include <sys/device.h> 43 1.63 yamaguch #include <sys/evcnt.h> 44 1.1 hannken #include <sys/intr.h> 45 1.1 hannken #include <sys/kmem.h> 46 1.1 hannken #include <sys/mbuf.h> 47 1.1 hannken #include <sys/mutex.h> 48 1.1 hannken #include <sys/sockio.h> 49 1.71 yamaguch #include <sys/syslog.h> 50 1.12 ozaki #include <sys/cpu.h> 51 1.26 pgoyette #include <sys/module.h> 52 1.46 yamaguch #include <sys/pcq.h> 53 1.55 yamaguch #include <sys/workqueue.h> 54 1.85 yamaguch #include <sys/xcall.h> 55 1.1 hannken 56 1.1 hannken #include <dev/pci/virtioreg.h> 57 1.1 hannken #include <dev/pci/virtiovar.h> 58 1.1 hannken 59 1.1 hannken #include <net/if.h> 60 1.74 yamaguch #include <net/if_dl.h> 61 1.1 hannken #include <net/if_media.h> 62 1.1 hannken #include <net/if_ether.h> 63 1.1 hannken 64 1.1 hannken #include <net/bpf.h> 65 1.1 hannken 66 1.26 pgoyette #include "ioconf.h" 67 1.1 hannken 68 1.7 ozaki #ifdef NET_MPSAFE 69 1.7 ozaki #define VIOIF_MPSAFE 1 70 1.46 yamaguch #define VIOIF_MULTIQ 1 71 1.7 ozaki #endif 72 1.7 ozaki 73 1.1 hannken /* 74 1.1 hannken * if_vioifreg.h: 75 1.1 hannken */ 76 1.1 hannken /* Configuration registers */ 77 1.66 reinoud #define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */ 78 1.66 reinoud #define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */ 79 1.66 reinoud #define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS 8 /* 16bit */ 80 1.66 reinoud #define VIRTIO_NET_CONFIG_MTU 10 /* 16bit */ 81 1.1 hannken 82 1.1 hannken /* Feature bits */ 83 1.46 yamaguch #define VIRTIO_NET_F_CSUM __BIT(0) 84 1.46 yamaguch #define VIRTIO_NET_F_GUEST_CSUM __BIT(1) 85 1.46 yamaguch #define VIRTIO_NET_F_MAC __BIT(5) 86 1.46 yamaguch #define VIRTIO_NET_F_GSO __BIT(6) 87 1.46 yamaguch #define VIRTIO_NET_F_GUEST_TSO4 __BIT(7) 88 1.46 yamaguch #define VIRTIO_NET_F_GUEST_TSO6 __BIT(8) 89 1.46 yamaguch #define VIRTIO_NET_F_GUEST_ECN __BIT(9) 90 1.46 yamaguch #define VIRTIO_NET_F_GUEST_UFO __BIT(10) 91 1.46 yamaguch #define VIRTIO_NET_F_HOST_TSO4 __BIT(11) 92 1.46 yamaguch #define VIRTIO_NET_F_HOST_TSO6 __BIT(12) 93 1.46 yamaguch #define VIRTIO_NET_F_HOST_ECN __BIT(13) 94 1.46 yamaguch #define VIRTIO_NET_F_HOST_UFO __BIT(14) 95 1.46 yamaguch #define VIRTIO_NET_F_MRG_RXBUF __BIT(15) 96 1.46 yamaguch #define VIRTIO_NET_F_STATUS __BIT(16) 97 1.46 yamaguch #define VIRTIO_NET_F_CTRL_VQ __BIT(17) 98 1.46 yamaguch #define VIRTIO_NET_F_CTRL_RX __BIT(18) 99 1.46 yamaguch #define VIRTIO_NET_F_CTRL_VLAN __BIT(19) 100 1.46 yamaguch #define VIRTIO_NET_F_CTRL_RX_EXTRA __BIT(20) 101 1.46 yamaguch #define VIRTIO_NET_F_GUEST_ANNOUNCE __BIT(21) 102 1.46 yamaguch #define VIRTIO_NET_F_MQ __BIT(22) 103 1.78 yamaguch #define VIRTIO_NET_F_CTRL_MAC_ADDR __BIT(23) 104 1.1 hannken 105 1.79 uwe #define VIRTIO_NET_FLAG_BITS \ 106 1.79 uwe VIRTIO_COMMON_FLAG_BITS \ 107 1.79 uwe "b\x17" "CTRL_MAC\0" \ 108 1.79 uwe "b\x16" "MQ\0" \ 109 1.79 uwe "b\x15" "GUEST_ANNOUNCE\0" \ 110 1.79 uwe "b\x14" "CTRL_RX_EXTRA\0" \ 111 1.79 uwe "b\x13" "CTRL_VLAN\0" \ 112 1.79 uwe "b\x12" "CTRL_RX\0" \ 113 1.79 uwe "b\x11" "CTRL_VQ\0" \ 114 1.79 uwe "b\x10" "STATUS\0" \ 115 1.79 uwe "b\x0f" "MRG_RXBUF\0" \ 116 1.79 uwe "b\x0e" "HOST_UFO\0" \ 117 1.79 uwe "b\x0d" "HOST_ECN\0" \ 118 1.79 uwe "b\x0c" "HOST_TSO6\0" \ 119 1.79 uwe "b\x0b" "HOST_TSO4\0" \ 120 1.79 uwe "b\x0a" "GUEST_UFO\0" \ 121 1.79 uwe "b\x09" "GUEST_ECN\0" \ 122 1.79 uwe "b\x08" "GUEST_TSO6\0" \ 123 1.79 uwe "b\x07" "GUEST_TSO4\0" \ 124 1.79 uwe "b\x06" "GSO\0" \ 125 1.79 uwe "b\x05" "MAC\0" \ 126 1.79 uwe "b\x01" "GUEST_CSUM\0" \ 127 1.79 uwe "b\x00" "CSUM\0" 128 1.18 christos 129 1.1 hannken /* Status */ 130 1.1 hannken #define VIRTIO_NET_S_LINK_UP 1 131 1.1 hannken 132 1.1 hannken /* Packet header structure */ 133 1.1 hannken struct virtio_net_hdr { 134 1.1 hannken uint8_t flags; 135 1.1 hannken uint8_t gso_type; 136 1.1 hannken uint16_t hdr_len; 137 1.1 hannken uint16_t gso_size; 138 1.1 hannken uint16_t csum_start; 139 1.1 hannken uint16_t csum_offset; 140 1.66 reinoud 141 1.66 reinoud uint16_t num_buffers; /* VIRTIO_NET_F_MRG_RXBUF enabled or v1 */ 142 1.1 hannken } __packed; 143 1.1 hannken 144 1.1 hannken #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */ 145 1.1 hannken #define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */ 146 1.1 hannken #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */ 147 1.1 hannken #define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */ 148 1.1 hannken #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */ 149 1.1 hannken #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */ 150 1.1 hannken 151 1.1 hannken #define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN) 152 1.1 hannken 153 1.1 hannken /* Control virtqueue */ 154 1.1 hannken struct virtio_net_ctrl_cmd { 155 1.1 hannken uint8_t class; 156 1.1 hannken uint8_t command; 157 1.1 hannken } __packed; 158 1.1 hannken #define VIRTIO_NET_CTRL_RX 0 159 1.1 hannken # define VIRTIO_NET_CTRL_RX_PROMISC 0 160 1.1 hannken # define VIRTIO_NET_CTRL_RX_ALLMULTI 1 161 1.1 hannken 162 1.1 hannken #define VIRTIO_NET_CTRL_MAC 1 163 1.1 hannken # define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 164 1.74 yamaguch # define VIRTIO_NET_CTRL_MAC_ADDR_SET 1 165 1.1 hannken 166 1.1 hannken #define VIRTIO_NET_CTRL_VLAN 2 167 1.1 hannken # define VIRTIO_NET_CTRL_VLAN_ADD 0 168 1.1 hannken # define VIRTIO_NET_CTRL_VLAN_DEL 1 169 1.1 hannken 170 1.46 yamaguch #define VIRTIO_NET_CTRL_MQ 4 171 1.46 yamaguch # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 172 1.46 yamaguch # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 173 1.46 yamaguch # define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 174 1.46 yamaguch 175 1.1 hannken struct virtio_net_ctrl_status { 176 1.1 hannken uint8_t ack; 177 1.1 hannken } __packed; 178 1.1 hannken #define VIRTIO_NET_OK 0 179 1.1 hannken #define VIRTIO_NET_ERR 1 180 1.1 hannken 181 1.1 hannken struct virtio_net_ctrl_rx { 182 1.1 hannken uint8_t onoff; 183 1.1 hannken } __packed; 184 1.1 hannken 185 1.1 hannken struct virtio_net_ctrl_mac_tbl { 186 1.1 hannken uint32_t nentries; 187 1.1 hannken uint8_t macs[][ETHER_ADDR_LEN]; 188 1.1 hannken } __packed; 189 1.1 hannken 190 1.74 yamaguch struct virtio_net_ctrl_mac_addr { 191 1.74 yamaguch uint8_t mac[ETHER_ADDR_LEN]; 192 1.74 yamaguch } __packed; 193 1.74 yamaguch 194 1.1 hannken struct virtio_net_ctrl_vlan { 195 1.1 hannken uint16_t id; 196 1.1 hannken } __packed; 197 1.1 hannken 198 1.46 yamaguch struct virtio_net_ctrl_mq { 199 1.46 yamaguch uint16_t virtqueue_pairs; 200 1.46 yamaguch } __packed; 201 1.46 yamaguch 202 1.1 hannken /* 203 1.1 hannken * if_vioifvar.h: 204 1.1 hannken */ 205 1.43 yamaguch 206 1.43 yamaguch /* 207 1.43 yamaguch * Locking notes: 208 1.109 andvar * + a field in vioif_netqueue is protected by netq_lock (a spin mutex) 209 1.43 yamaguch * - more than one lock cannot be held at onece 210 1.98 yamaguch * + a field in vioif_tx_context and vioif_rx_context is also protected 211 1.98 yamaguch * by netq_lock. 212 1.43 yamaguch * + ctrlq_inuse is protected by ctrlq_wait_lock. 213 1.43 yamaguch * - other fields in vioif_ctrlqueue are protected by ctrlq_inuse 214 1.98 yamaguch * - netq_lock cannot be held along with ctrlq_wait_lock 215 1.62 yamaguch * + fields in vioif_softc except queues are protected by 216 1.62 yamaguch * sc->sc_lock(an adaptive mutex) 217 1.62 yamaguch * - the lock is held before acquisition of other locks 218 1.43 yamaguch */ 219 1.43 yamaguch 220 1.66 reinoud struct vioif_ctrl_cmdspec { 221 1.66 reinoud bus_dmamap_t dmamap; 222 1.66 reinoud void *buf; 223 1.66 reinoud bus_size_t bufsize; 224 1.66 reinoud }; 225 1.66 reinoud 226 1.55 yamaguch struct vioif_work { 227 1.55 yamaguch struct work cookie; 228 1.55 yamaguch void (*func)(void *); 229 1.55 yamaguch void *arg; 230 1.55 yamaguch unsigned int added; 231 1.55 yamaguch }; 232 1.55 yamaguch 233 1.95 yamaguch struct vioif_net_map { 234 1.95 yamaguch struct virtio_net_hdr *vnm_hdr; 235 1.95 yamaguch bus_dmamap_t vnm_hdr_map; 236 1.95 yamaguch struct mbuf *vnm_mbuf; 237 1.95 yamaguch bus_dmamap_t vnm_mbuf_map; 238 1.95 yamaguch }; 239 1.95 yamaguch 240 1.98 yamaguch #define VIOIF_NETQ_RX 0 241 1.98 yamaguch #define VIOIF_NETQ_TX 1 242 1.98 yamaguch #define VIOIF_NETQ_IDX 2 243 1.98 yamaguch #define VIOIF_NETQ_DIR(n) ((n) % VIOIF_NETQ_IDX) 244 1.98 yamaguch #define VIOIF_NETQ_PAIRIDX(n) ((n) / VIOIF_NETQ_IDX) 245 1.98 yamaguch #define VIOIF_NETQ_RXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_RX) 246 1.98 yamaguch #define VIOIF_NETQ_TXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_TX) 247 1.98 yamaguch 248 1.98 yamaguch struct vioif_netqueue { 249 1.98 yamaguch kmutex_t netq_lock; 250 1.98 yamaguch struct virtqueue *netq_vq; 251 1.98 yamaguch bool netq_stopping; 252 1.98 yamaguch bool netq_running_handle; 253 1.98 yamaguch void *netq_maps_kva; 254 1.98 yamaguch struct vioif_net_map *netq_maps; 255 1.98 yamaguch 256 1.98 yamaguch void *netq_softint; 257 1.98 yamaguch struct vioif_work netq_work; 258 1.98 yamaguch bool netq_workqueue; 259 1.98 yamaguch 260 1.98 yamaguch char netq_evgroup[32]; 261 1.98 yamaguch struct evcnt netq_mbuf_load_failed; 262 1.99 yamaguch struct evcnt netq_enqueue_failed; 263 1.43 yamaguch 264 1.98 yamaguch void *netq_ctx; 265 1.43 yamaguch }; 266 1.43 yamaguch 267 1.98 yamaguch struct vioif_tx_context { 268 1.98 yamaguch bool txc_link_active; 269 1.102 yamaguch bool txc_no_free_slots; 270 1.98 yamaguch pcq_t *txc_intrq; 271 1.98 yamaguch void *txc_deferred_transmit; 272 1.43 yamaguch 273 1.98 yamaguch struct evcnt txc_defrag_failed; 274 1.115 ozaki struct evcnt txc_pcq_full; 275 1.98 yamaguch }; 276 1.43 yamaguch 277 1.98 yamaguch struct vioif_rx_context { 278 1.98 yamaguch struct evcnt rxc_mbuf_enobufs; 279 1.43 yamaguch }; 280 1.43 yamaguch struct vioif_ctrlqueue { 281 1.43 yamaguch struct virtqueue *ctrlq_vq; 282 1.43 yamaguch enum { 283 1.43 yamaguch FREE, INUSE, DONE 284 1.43 yamaguch } ctrlq_inuse; 285 1.43 yamaguch kcondvar_t ctrlq_wait; 286 1.43 yamaguch kmutex_t ctrlq_wait_lock; 287 1.44 yamaguch struct lwp *ctrlq_owner; 288 1.43 yamaguch 289 1.43 yamaguch struct virtio_net_ctrl_cmd *ctrlq_cmd; 290 1.43 yamaguch struct virtio_net_ctrl_status *ctrlq_status; 291 1.43 yamaguch struct virtio_net_ctrl_rx *ctrlq_rx; 292 1.43 yamaguch struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc; 293 1.43 yamaguch struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc; 294 1.74 yamaguch struct virtio_net_ctrl_mac_addr *ctrlq_mac_addr; 295 1.46 yamaguch struct virtio_net_ctrl_mq *ctrlq_mq; 296 1.43 yamaguch 297 1.43 yamaguch bus_dmamap_t ctrlq_cmd_dmamap; 298 1.43 yamaguch bus_dmamap_t ctrlq_status_dmamap; 299 1.43 yamaguch bus_dmamap_t ctrlq_rx_dmamap; 300 1.43 yamaguch bus_dmamap_t ctrlq_tbl_uc_dmamap; 301 1.43 yamaguch bus_dmamap_t ctrlq_tbl_mc_dmamap; 302 1.74 yamaguch bus_dmamap_t ctrlq_mac_addr_dmamap; 303 1.46 yamaguch bus_dmamap_t ctrlq_mq_dmamap; 304 1.63 yamaguch 305 1.63 yamaguch struct evcnt ctrlq_cmd_load_failed; 306 1.63 yamaguch struct evcnt ctrlq_cmd_failed; 307 1.43 yamaguch }; 308 1.43 yamaguch 309 1.1 hannken struct vioif_softc { 310 1.1 hannken device_t sc_dev; 311 1.62 yamaguch kmutex_t sc_lock; 312 1.55 yamaguch struct sysctllog *sc_sysctllog; 313 1.1 hannken 314 1.1 hannken struct virtio_softc *sc_virtio; 315 1.46 yamaguch struct virtqueue *sc_vqs; 316 1.66 reinoud u_int sc_hdr_size; 317 1.46 yamaguch 318 1.46 yamaguch int sc_max_nvq_pairs; 319 1.46 yamaguch int sc_req_nvq_pairs; 320 1.46 yamaguch int sc_act_nvq_pairs; 321 1.1 hannken 322 1.1 hannken uint8_t sc_mac[ETHER_ADDR_LEN]; 323 1.1 hannken struct ethercom sc_ethercom; 324 1.82 knakahar int sc_link_state; 325 1.1 hannken 326 1.98 yamaguch struct vioif_netqueue *sc_netqs; 327 1.43 yamaguch 328 1.43 yamaguch bool sc_has_ctrl; 329 1.43 yamaguch struct vioif_ctrlqueue sc_ctrlq; 330 1.43 yamaguch 331 1.100 yamaguch bus_dma_segment_t sc_segs[1]; 332 1.43 yamaguch void *sc_dmamem; 333 1.43 yamaguch void *sc_kmem; 334 1.1 hannken 335 1.101 yamaguch void *sc_cfg_softint; 336 1.55 yamaguch 337 1.55 yamaguch struct workqueue *sc_txrx_workqueue; 338 1.55 yamaguch bool sc_txrx_workqueue_sysctl; 339 1.55 yamaguch u_int sc_tx_intr_process_limit; 340 1.55 yamaguch u_int sc_tx_process_limit; 341 1.55 yamaguch u_int sc_rx_intr_process_limit; 342 1.55 yamaguch u_int sc_rx_process_limit; 343 1.1 hannken }; 344 1.1 hannken #define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */ 345 1.1 hannken #define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */ 346 1.1 hannken 347 1.55 yamaguch #define VIOIF_TX_INTR_PROCESS_LIMIT 256 348 1.55 yamaguch #define VIOIF_TX_PROCESS_LIMIT 256 349 1.55 yamaguch #define VIOIF_RX_INTR_PROCESS_LIMIT 0U 350 1.55 yamaguch #define VIOIF_RX_PROCESS_LIMIT 256 351 1.55 yamaguch 352 1.55 yamaguch #define VIOIF_WORKQUEUE_PRI PRI_SOFTNET 353 1.82 knakahar #define VIOIF_IS_LINK_ACTIVE(_sc) ((_sc)->sc_link_state == LINK_STATE_UP ? \ 354 1.82 knakahar true : false) 355 1.55 yamaguch 356 1.1 hannken /* cfattach interface functions */ 357 1.1 hannken static int vioif_match(device_t, cfdata_t, void *); 358 1.1 hannken static void vioif_attach(device_t, device_t, void *); 359 1.55 yamaguch static int vioif_finalize_teardown(device_t); 360 1.1 hannken 361 1.1 hannken /* ifnet interface functions */ 362 1.1 hannken static int vioif_init(struct ifnet *); 363 1.1 hannken static void vioif_stop(struct ifnet *, int); 364 1.1 hannken static void vioif_start(struct ifnet *); 365 1.46 yamaguch static int vioif_transmit(struct ifnet *, struct mbuf *); 366 1.1 hannken static int vioif_ioctl(struct ifnet *, u_long, void *); 367 1.1 hannken static void vioif_watchdog(struct ifnet *); 368 1.101 yamaguch static int vioif_ifflags(struct vioif_softc *); 369 1.75 yamaguch static int vioif_ifflags_cb(struct ethercom *); 370 1.1 hannken 371 1.98 yamaguch /* tx & rx */ 372 1.101 yamaguch static int vioif_netqueue_init(struct vioif_softc *, 373 1.101 yamaguch struct virtio_softc *, size_t, u_int); 374 1.101 yamaguch static void vioif_netqueue_teardown(struct vioif_softc *, 375 1.101 yamaguch struct virtio_softc *, size_t); 376 1.99 yamaguch static void vioif_net_intr_enable(struct vioif_softc *, 377 1.99 yamaguch struct virtio_softc *); 378 1.99 yamaguch static void vioif_net_intr_disable(struct vioif_softc *, 379 1.99 yamaguch struct virtio_softc *); 380 1.101 yamaguch static void vioif_net_sched_handle(struct vioif_softc *, 381 1.101 yamaguch struct vioif_netqueue *); 382 1.98 yamaguch 383 1.1 hannken /* rx */ 384 1.66 reinoud static void vioif_populate_rx_mbufs_locked(struct vioif_softc *, 385 1.98 yamaguch struct vioif_netqueue *); 386 1.54 yamaguch static int vioif_rx_intr(void *); 387 1.55 yamaguch static void vioif_rx_handle(void *); 388 1.101 yamaguch static void vioif_rx_queue_clear(struct vioif_softc *, 389 1.101 yamaguch struct virtio_softc *, struct vioif_netqueue *); 390 1.1 hannken 391 1.1 hannken /* tx */ 392 1.101 yamaguch static void vioif_start_locked(struct ifnet *, struct vioif_netqueue *); 393 1.101 yamaguch static void vioif_transmit_locked(struct ifnet *, struct vioif_netqueue *); 394 1.101 yamaguch static void vioif_deferred_transmit(void *); 395 1.54 yamaguch static int vioif_tx_intr(void *); 396 1.55 yamaguch static void vioif_tx_handle(void *); 397 1.93 yamaguch static void vioif_tx_queue_clear(struct vioif_softc *, struct virtio_softc *, 398 1.98 yamaguch struct vioif_netqueue *); 399 1.1 hannken 400 1.101 yamaguch /* controls */ 401 1.101 yamaguch static int vioif_ctrl_intr(void *); 402 1.1 hannken static int vioif_ctrl_rx(struct vioif_softc *, int, bool); 403 1.1 hannken static int vioif_set_promisc(struct vioif_softc *, bool); 404 1.1 hannken static int vioif_set_allmulti(struct vioif_softc *, bool); 405 1.1 hannken static int vioif_set_rx_filter(struct vioif_softc *); 406 1.1 hannken static int vioif_rx_filter(struct vioif_softc *); 407 1.74 yamaguch static int vioif_set_mac_addr(struct vioif_softc *); 408 1.101 yamaguch static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int); 409 1.101 yamaguch 410 1.101 yamaguch /* config interrupt */ 411 1.34 ozaki static int vioif_config_change(struct virtio_softc *); 412 1.101 yamaguch static void vioif_cfg_softint(void *); 413 1.101 yamaguch static void vioif_update_link_status(struct vioif_softc *); 414 1.101 yamaguch 415 1.101 yamaguch /* others */ 416 1.101 yamaguch static void vioif_alloc_queues(struct vioif_softc *); 417 1.101 yamaguch static void vioif_free_queues(struct vioif_softc *); 418 1.101 yamaguch static int vioif_alloc_mems(struct vioif_softc *); 419 1.101 yamaguch static struct workqueue* 420 1.101 yamaguch vioif_workq_create(const char *, pri_t, int, int); 421 1.101 yamaguch static void vioif_workq_destroy(struct workqueue *); 422 1.101 yamaguch static void vioif_work_set(struct vioif_work *, void(*)(void *), void *); 423 1.101 yamaguch static void vioif_work_add(struct workqueue *, struct vioif_work *); 424 1.101 yamaguch static void vioif_work_wait(struct workqueue *, struct vioif_work *); 425 1.55 yamaguch static int vioif_setup_sysctl(struct vioif_softc *); 426 1.63 yamaguch static void vioif_setup_stats(struct vioif_softc *); 427 1.1 hannken 428 1.1 hannken CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc), 429 1.1 hannken vioif_match, vioif_attach, NULL, NULL); 430 1.1 hannken 431 1.101 yamaguch static void 432 1.101 yamaguch vioif_intr_barrier(void) 433 1.101 yamaguch { 434 1.101 yamaguch 435 1.101 yamaguch /* wait for finish all interrupt handler */ 436 1.101 yamaguch xc_barrier(0); 437 1.101 yamaguch } 438 1.101 yamaguch 439 1.101 yamaguch static void 440 1.101 yamaguch vioif_notify(struct virtio_softc *vsc, struct virtqueue *vq) 441 1.101 yamaguch { 442 1.101 yamaguch 443 1.101 yamaguch virtio_enqueue_commit(vsc, vq, -1, true); 444 1.101 yamaguch } 445 1.101 yamaguch 446 1.1 hannken static int 447 1.1 hannken vioif_match(device_t parent, cfdata_t match, void *aux) 448 1.1 hannken { 449 1.32 jdolecek struct virtio_attach_args *va = aux; 450 1.1 hannken 451 1.66 reinoud if (va->sc_childdevid == VIRTIO_DEVICE_ID_NETWORK) 452 1.1 hannken return 1; 453 1.1 hannken 454 1.1 hannken return 0; 455 1.1 hannken } 456 1.1 hannken 457 1.101 yamaguch static void 458 1.101 yamaguch vioif_attach(device_t parent, device_t self, void *aux) 459 1.58 yamaguch { 460 1.101 yamaguch struct vioif_softc *sc = device_private(self); 461 1.101 yamaguch struct virtio_softc *vsc = device_private(parent); 462 1.101 yamaguch struct vioif_netqueue *txq0; 463 1.101 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 464 1.101 yamaguch uint64_t features, req_features; 465 1.101 yamaguch struct ifnet *ifp = &sc->sc_ethercom.ec_if; 466 1.101 yamaguch u_int softint_flags; 467 1.101 yamaguch int r, i, req_flags; 468 1.101 yamaguch char xnamebuf[MAXCOMLEN]; 469 1.103 yamaguch size_t nvqs; 470 1.101 yamaguch 471 1.101 yamaguch if (virtio_child(vsc) != NULL) { 472 1.101 yamaguch aprint_normal(": child already attached for %s; " 473 1.101 yamaguch "something wrong...\n", device_xname(parent)); 474 1.101 yamaguch return; 475 1.101 yamaguch } 476 1.101 yamaguch 477 1.101 yamaguch sc->sc_dev = self; 478 1.101 yamaguch sc->sc_virtio = vsc; 479 1.101 yamaguch sc->sc_link_state = LINK_STATE_UNKNOWN; 480 1.101 yamaguch 481 1.101 yamaguch sc->sc_max_nvq_pairs = 1; 482 1.101 yamaguch sc->sc_req_nvq_pairs = 1; 483 1.101 yamaguch sc->sc_act_nvq_pairs = 1; 484 1.101 yamaguch sc->sc_txrx_workqueue_sysctl = true; 485 1.101 yamaguch sc->sc_tx_intr_process_limit = VIOIF_TX_INTR_PROCESS_LIMIT; 486 1.101 yamaguch sc->sc_tx_process_limit = VIOIF_TX_PROCESS_LIMIT; 487 1.101 yamaguch sc->sc_rx_intr_process_limit = VIOIF_RX_INTR_PROCESS_LIMIT; 488 1.101 yamaguch sc->sc_rx_process_limit = VIOIF_RX_PROCESS_LIMIT; 489 1.101 yamaguch 490 1.101 yamaguch mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); 491 1.101 yamaguch 492 1.101 yamaguch snprintf(xnamebuf, sizeof(xnamebuf), "%s_txrx", device_xname(self)); 493 1.101 yamaguch sc->sc_txrx_workqueue = vioif_workq_create(xnamebuf, VIOIF_WORKQUEUE_PRI, 494 1.101 yamaguch IPL_NET, WQ_PERCPU | WQ_MPSAFE); 495 1.101 yamaguch if (sc->sc_txrx_workqueue == NULL) 496 1.101 yamaguch goto err; 497 1.58 yamaguch 498 1.101 yamaguch req_flags = 0; 499 1.58 yamaguch 500 1.101 yamaguch #ifdef VIOIF_MPSAFE 501 1.101 yamaguch req_flags |= VIRTIO_F_INTR_MPSAFE; 502 1.101 yamaguch #endif 503 1.101 yamaguch req_flags |= VIRTIO_F_INTR_MSIX; 504 1.58 yamaguch 505 1.101 yamaguch req_features = 506 1.101 yamaguch VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ | 507 1.101 yamaguch VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY; 508 1.101 yamaguch req_features |= VIRTIO_F_RING_EVENT_IDX; 509 1.101 yamaguch req_features |= VIRTIO_NET_F_CTRL_MAC_ADDR; 510 1.101 yamaguch #ifdef VIOIF_MULTIQ 511 1.101 yamaguch req_features |= VIRTIO_NET_F_MQ; 512 1.101 yamaguch #endif 513 1.103 yamaguch 514 1.103 yamaguch virtio_child_attach_start(vsc, self, IPL_NET, 515 1.101 yamaguch req_features, VIRTIO_NET_FLAG_BITS); 516 1.103 yamaguch features = virtio_features(vsc); 517 1.58 yamaguch 518 1.101 yamaguch if (features == 0) 519 1.101 yamaguch goto err; 520 1.58 yamaguch 521 1.101 yamaguch if (features & VIRTIO_NET_F_MAC) { 522 1.101 yamaguch for (i = 0; i < __arraycount(sc->sc_mac); i++) { 523 1.101 yamaguch sc->sc_mac[i] = virtio_read_device_config_1(vsc, 524 1.101 yamaguch VIRTIO_NET_CONFIG_MAC + i); 525 1.101 yamaguch } 526 1.101 yamaguch } else { 527 1.101 yamaguch /* code stolen from sys/net/if_tap.c */ 528 1.101 yamaguch struct timeval tv; 529 1.101 yamaguch uint32_t ui; 530 1.101 yamaguch getmicrouptime(&tv); 531 1.101 yamaguch ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff; 532 1.101 yamaguch memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3); 533 1.101 yamaguch for (i = 0; i < __arraycount(sc->sc_mac); i++) { 534 1.101 yamaguch virtio_write_device_config_1(vsc, 535 1.101 yamaguch VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]); 536 1.101 yamaguch } 537 1.58 yamaguch } 538 1.58 yamaguch 539 1.101 yamaguch /* 'Ethernet' with capital follows other ethernet driver attachment */ 540 1.101 yamaguch aprint_normal_dev(self, "Ethernet address %s\n", 541 1.101 yamaguch ether_sprintf(sc->sc_mac)); 542 1.101 yamaguch 543 1.101 yamaguch if (features & (VIRTIO_NET_F_MRG_RXBUF | VIRTIO_F_VERSION_1)) { 544 1.101 yamaguch sc->sc_hdr_size = sizeof(struct virtio_net_hdr); 545 1.101 yamaguch } else { 546 1.101 yamaguch sc->sc_hdr_size = offsetof(struct virtio_net_hdr, num_buffers); 547 1.101 yamaguch } 548 1.58 yamaguch 549 1.101 yamaguch if ((features & VIRTIO_NET_F_CTRL_VQ) && 550 1.101 yamaguch (features & VIRTIO_NET_F_CTRL_RX)) { 551 1.101 yamaguch sc->sc_has_ctrl = true; 552 1.58 yamaguch 553 1.101 yamaguch cv_init(&ctrlq->ctrlq_wait, "ctrl_vq"); 554 1.101 yamaguch mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET); 555 1.101 yamaguch ctrlq->ctrlq_inuse = FREE; 556 1.101 yamaguch } else { 557 1.101 yamaguch sc->sc_has_ctrl = false; 558 1.58 yamaguch } 559 1.58 yamaguch 560 1.101 yamaguch if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) { 561 1.101 yamaguch sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc, 562 1.101 yamaguch VIRTIO_NET_CONFIG_MAX_VQ_PAIRS); 563 1.101 yamaguch 564 1.101 yamaguch if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) 565 1.101 yamaguch goto err; 566 1.58 yamaguch 567 1.101 yamaguch /* Limit the number of queue pairs to use */ 568 1.101 yamaguch sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu); 569 1.103 yamaguch 570 1.103 yamaguch if (sc->sc_max_nvq_pairs > 1) 571 1.103 yamaguch req_flags |= VIRTIO_F_INTR_PERVQ; 572 1.101 yamaguch } 573 1.58 yamaguch 574 1.101 yamaguch vioif_alloc_queues(sc); 575 1.58 yamaguch 576 1.101 yamaguch #ifdef VIOIF_MPSAFE 577 1.101 yamaguch softint_flags = SOFTINT_NET | SOFTINT_MPSAFE; 578 1.101 yamaguch #else 579 1.101 yamaguch softint_flags = SOFTINT_NET; 580 1.101 yamaguch #endif 581 1.58 yamaguch 582 1.101 yamaguch /* 583 1.101 yamaguch * Initialize network queues 584 1.101 yamaguch */ 585 1.103 yamaguch nvqs = sc->sc_max_nvq_pairs * 2; 586 1.103 yamaguch for (i = 0; i < nvqs; i++) { 587 1.101 yamaguch r = vioif_netqueue_init(sc, vsc, i, softint_flags); 588 1.101 yamaguch if (r != 0) 589 1.101 yamaguch goto err; 590 1.101 yamaguch } 591 1.46 yamaguch 592 1.101 yamaguch if (sc->sc_has_ctrl) { 593 1.103 yamaguch int ctrlq_idx = nvqs; 594 1.103 yamaguch 595 1.103 yamaguch nvqs++; 596 1.101 yamaguch /* 597 1.101 yamaguch * Allocating a virtqueue for control channel 598 1.101 yamaguch */ 599 1.101 yamaguch sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[ctrlq_idx]; 600 1.104 yamaguch virtio_init_vq(vsc, ctrlq->ctrlq_vq, ctrlq_idx, 601 1.104 yamaguch vioif_ctrl_intr, ctrlq); 602 1.104 yamaguch 603 1.104 yamaguch r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, NBPG, 1, "control"); 604 1.101 yamaguch if (r != 0) { 605 1.101 yamaguch aprint_error_dev(self, "failed to allocate " 606 1.101 yamaguch "a virtqueue for control channel, error code %d\n", 607 1.101 yamaguch r); 608 1.46 yamaguch 609 1.101 yamaguch sc->sc_has_ctrl = false; 610 1.101 yamaguch cv_destroy(&ctrlq->ctrlq_wait); 611 1.101 yamaguch mutex_destroy(&ctrlq->ctrlq_wait_lock); 612 1.101 yamaguch } 613 1.101 yamaguch } 614 1.46 yamaguch 615 1.101 yamaguch sc->sc_cfg_softint = softint_establish(softint_flags, 616 1.101 yamaguch vioif_cfg_softint, sc); 617 1.101 yamaguch if (sc->sc_cfg_softint == NULL) { 618 1.101 yamaguch aprint_error_dev(self, "cannot establish ctl softint\n"); 619 1.101 yamaguch goto err; 620 1.101 yamaguch } 621 1.46 yamaguch 622 1.101 yamaguch if (vioif_alloc_mems(sc) < 0) 623 1.101 yamaguch goto err; 624 1.46 yamaguch 625 1.103 yamaguch r = virtio_child_attach_finish(vsc, sc->sc_vqs, nvqs, 626 1.104 yamaguch vioif_config_change, req_flags); 627 1.103 yamaguch if (r != 0) 628 1.101 yamaguch goto err; 629 1.46 yamaguch 630 1.101 yamaguch if (vioif_setup_sysctl(sc) != 0) { 631 1.101 yamaguch aprint_error_dev(self, "unable to create sysctl node\n"); 632 1.101 yamaguch /* continue */ 633 1.101 yamaguch } 634 1.98 yamaguch 635 1.101 yamaguch vioif_setup_stats(sc); 636 1.98 yamaguch 637 1.101 yamaguch strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 638 1.101 yamaguch ifp->if_softc = sc; 639 1.101 yamaguch ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 640 1.101 yamaguch #ifdef VIOIF_MPSAFE 641 1.101 yamaguch ifp->if_extflags = IFEF_MPSAFE; 642 1.101 yamaguch #endif 643 1.101 yamaguch ifp->if_start = vioif_start; 644 1.101 yamaguch if (sc->sc_req_nvq_pairs > 1) 645 1.101 yamaguch ifp->if_transmit = vioif_transmit; 646 1.101 yamaguch ifp->if_ioctl = vioif_ioctl; 647 1.101 yamaguch ifp->if_init = vioif_init; 648 1.101 yamaguch ifp->if_stop = vioif_stop; 649 1.101 yamaguch ifp->if_capabilities = 0; 650 1.101 yamaguch ifp->if_watchdog = vioif_watchdog; 651 1.101 yamaguch txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]; 652 1.101 yamaguch IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq0->netq_vq->vq_num, IFQ_MAXLEN)); 653 1.101 yamaguch IFQ_SET_READY(&ifp->if_snd); 654 1.98 yamaguch 655 1.101 yamaguch sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 656 1.98 yamaguch 657 1.101 yamaguch if_attach(ifp); 658 1.101 yamaguch if_deferred_start_init(ifp, NULL); 659 1.101 yamaguch ether_ifattach(ifp, sc->sc_mac); 660 1.101 yamaguch ether_set_ifflags_cb(&sc->sc_ethercom, vioif_ifflags_cb); 661 1.98 yamaguch 662 1.101 yamaguch return; 663 1.98 yamaguch 664 1.98 yamaguch err: 665 1.103 yamaguch nvqs = sc->sc_max_nvq_pairs * 2; 666 1.103 yamaguch for (i = 0; i < nvqs; i++) { 667 1.101 yamaguch vioif_netqueue_teardown(sc, vsc, i); 668 1.98 yamaguch } 669 1.98 yamaguch 670 1.101 yamaguch if (sc->sc_has_ctrl) { 671 1.101 yamaguch cv_destroy(&ctrlq->ctrlq_wait); 672 1.101 yamaguch mutex_destroy(&ctrlq->ctrlq_wait_lock); 673 1.101 yamaguch virtio_free_vq(vsc, ctrlq->ctrlq_vq); 674 1.101 yamaguch ctrlq->ctrlq_vq = NULL; 675 1.98 yamaguch } 676 1.98 yamaguch 677 1.101 yamaguch vioif_free_queues(sc); 678 1.101 yamaguch mutex_destroy(&sc->sc_lock); 679 1.101 yamaguch virtio_child_attach_failed(vsc); 680 1.101 yamaguch config_finalize_register(self, vioif_finalize_teardown); 681 1.98 yamaguch 682 1.101 yamaguch return; 683 1.98 yamaguch } 684 1.98 yamaguch 685 1.101 yamaguch static int 686 1.101 yamaguch vioif_finalize_teardown(device_t self) 687 1.98 yamaguch { 688 1.101 yamaguch struct vioif_softc *sc = device_private(self); 689 1.98 yamaguch 690 1.101 yamaguch if (sc->sc_txrx_workqueue != NULL) { 691 1.101 yamaguch vioif_workq_destroy(sc->sc_txrx_workqueue); 692 1.101 yamaguch sc->sc_txrx_workqueue = NULL; 693 1.46 yamaguch } 694 1.98 yamaguch 695 1.101 yamaguch return 0; 696 1.46 yamaguch } 697 1.46 yamaguch 698 1.1 hannken /* 699 1.101 yamaguch * Interface functions for ifnet 700 1.1 hannken */ 701 1.1 hannken static int 702 1.101 yamaguch vioif_init(struct ifnet *ifp) 703 1.1 hannken { 704 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 705 1.1 hannken struct virtio_softc *vsc = sc->sc_virtio; 706 1.98 yamaguch struct vioif_netqueue *netq; 707 1.98 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 708 1.101 yamaguch int r, i; 709 1.101 yamaguch 710 1.101 yamaguch vioif_stop(ifp, 0); 711 1.101 yamaguch 712 1.101 yamaguch r = virtio_reinit_start(vsc); 713 1.101 yamaguch if (r != 0) { 714 1.101 yamaguch log(LOG_ERR, "%s: reset failed\n", ifp->if_xname); 715 1.101 yamaguch return EIO; 716 1.101 yamaguch } 717 1.1 hannken 718 1.101 yamaguch virtio_negotiate_features(vsc, virtio_features(vsc)); 719 1.98 yamaguch 720 1.101 yamaguch for (i = 0; i < sc->sc_req_nvq_pairs; i++) { 721 1.101 yamaguch netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)]; 722 1.46 yamaguch 723 1.101 yamaguch mutex_enter(&netq->netq_lock); 724 1.101 yamaguch vioif_populate_rx_mbufs_locked(sc, netq); 725 1.101 yamaguch mutex_exit(&netq->netq_lock); 726 1.46 yamaguch } 727 1.98 yamaguch 728 1.101 yamaguch virtio_reinit_end(vsc); 729 1.101 yamaguch 730 1.101 yamaguch if (sc->sc_has_ctrl) 731 1.101 yamaguch virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq); 732 1.98 yamaguch 733 1.101 yamaguch r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs); 734 1.101 yamaguch if (r == 0) 735 1.101 yamaguch sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs; 736 1.101 yamaguch else 737 1.101 yamaguch sc->sc_act_nvq_pairs = 1; 738 1.101 yamaguch 739 1.101 yamaguch SET(ifp->if_flags, IFF_RUNNING); 740 1.42 yamaguch 741 1.101 yamaguch vioif_net_intr_enable(sc, vsc); 742 1.42 yamaguch 743 1.101 yamaguch vioif_update_link_status(sc); 744 1.101 yamaguch r = vioif_rx_filter(sc); 745 1.98 yamaguch 746 1.101 yamaguch return r; 747 1.101 yamaguch } 748 1.98 yamaguch 749 1.101 yamaguch static void 750 1.101 yamaguch vioif_stop(struct ifnet *ifp, int disable) 751 1.101 yamaguch { 752 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 753 1.101 yamaguch struct virtio_softc *vsc = sc->sc_virtio; 754 1.101 yamaguch struct vioif_netqueue *netq; 755 1.101 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 756 1.101 yamaguch size_t i, act_qnum; 757 1.1 hannken 758 1.101 yamaguch act_qnum = sc->sc_act_nvq_pairs * 2; 759 1.46 yamaguch 760 1.101 yamaguch CLR(ifp->if_flags, IFF_RUNNING); 761 1.101 yamaguch for (i = 0; i < act_qnum; i++) { 762 1.101 yamaguch netq = &sc->sc_netqs[i]; 763 1.95 yamaguch 764 1.101 yamaguch mutex_enter(&netq->netq_lock); 765 1.101 yamaguch netq->netq_stopping = true; 766 1.101 yamaguch mutex_exit(&netq->netq_lock); 767 1.98 yamaguch } 768 1.46 yamaguch 769 1.101 yamaguch /* disable interrupts */ 770 1.101 yamaguch vioif_net_intr_disable(sc, vsc); 771 1.101 yamaguch if (sc->sc_has_ctrl) 772 1.101 yamaguch virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq); 773 1.101 yamaguch 774 1.101 yamaguch /* 775 1.101 yamaguch * only way to stop interrupt, I/O and DMA is resetting... 776 1.101 yamaguch * 777 1.101 yamaguch * NOTE: Devices based on VirtIO draft specification can not 778 1.101 yamaguch * stop interrupt completely even if virtio_stop_vq_intr() is called. 779 1.101 yamaguch */ 780 1.101 yamaguch virtio_reset(vsc); 781 1.101 yamaguch 782 1.101 yamaguch vioif_intr_barrier(); 783 1.98 yamaguch 784 1.101 yamaguch for (i = 0; i < act_qnum; i++) { 785 1.101 yamaguch netq = &sc->sc_netqs[i]; 786 1.101 yamaguch vioif_work_wait(sc->sc_txrx_workqueue, &netq->netq_work); 787 1.101 yamaguch } 788 1.42 yamaguch 789 1.101 yamaguch for (i = 0; i < sc->sc_act_nvq_pairs; i++) { 790 1.101 yamaguch netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)]; 791 1.101 yamaguch vioif_rx_queue_clear(sc, vsc, netq); 792 1.98 yamaguch 793 1.101 yamaguch netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)]; 794 1.101 yamaguch vioif_tx_queue_clear(sc, vsc, netq); 795 1.98 yamaguch } 796 1.98 yamaguch 797 1.101 yamaguch /* all packet processing is stopped */ 798 1.101 yamaguch for (i = 0; i < act_qnum; i++) { 799 1.101 yamaguch netq = &sc->sc_netqs[i]; 800 1.1 hannken 801 1.101 yamaguch mutex_enter(&netq->netq_lock); 802 1.101 yamaguch netq->netq_stopping = false; 803 1.101 yamaguch mutex_exit(&netq->netq_lock); 804 1.101 yamaguch } 805 1.101 yamaguch } 806 1.95 yamaguch 807 1.101 yamaguch static void 808 1.101 yamaguch vioif_start(struct ifnet *ifp) 809 1.101 yamaguch { 810 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 811 1.101 yamaguch struct vioif_netqueue *txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]; 812 1.58 yamaguch 813 1.101 yamaguch #ifdef VIOIF_MPSAFE 814 1.101 yamaguch KASSERT(if_is_mpsafe(ifp)); 815 1.101 yamaguch #endif 816 1.58 yamaguch 817 1.101 yamaguch mutex_enter(&txq0->netq_lock); 818 1.101 yamaguch vioif_start_locked(ifp, txq0); 819 1.101 yamaguch mutex_exit(&txq0->netq_lock); 820 1.101 yamaguch } 821 1.1 hannken 822 1.101 yamaguch static inline int 823 1.101 yamaguch vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m) 824 1.101 yamaguch { 825 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 826 1.101 yamaguch u_int cpuid = cpu_index(curcpu()); 827 1.58 yamaguch 828 1.101 yamaguch return VIOIF_NETQ_TXQID(cpuid % sc->sc_act_nvq_pairs); 829 1.101 yamaguch } 830 1.1 hannken 831 1.101 yamaguch static int 832 1.101 yamaguch vioif_transmit(struct ifnet *ifp, struct mbuf *m) 833 1.101 yamaguch { 834 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 835 1.101 yamaguch struct vioif_netqueue *netq; 836 1.101 yamaguch struct vioif_tx_context *txc; 837 1.101 yamaguch int qid; 838 1.1 hannken 839 1.101 yamaguch qid = vioif_select_txqueue(ifp, m); 840 1.101 yamaguch netq = &sc->sc_netqs[qid]; 841 1.101 yamaguch txc = netq->netq_ctx; 842 1.46 yamaguch 843 1.101 yamaguch if (__predict_false(!pcq_put(txc->txc_intrq, m))) { 844 1.115 ozaki txc->txc_pcq_full.ev_count++; 845 1.101 yamaguch m_freem(m); 846 1.101 yamaguch return ENOBUFS; 847 1.101 yamaguch } 848 1.1 hannken 849 1.101 yamaguch net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 850 1.112 riastrad if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len); 851 1.101 yamaguch if (m->m_flags & M_MCAST) 852 1.112 riastrad if_statinc_ref(ifp, nsr, if_omcasts); 853 1.101 yamaguch IF_STAT_PUTREF(ifp); 854 1.74 yamaguch 855 1.101 yamaguch if (mutex_tryenter(&netq->netq_lock)) { 856 1.101 yamaguch vioif_transmit_locked(ifp, netq); 857 1.101 yamaguch mutex_exit(&netq->netq_lock); 858 1.1 hannken } 859 1.1 hannken 860 1.1 hannken return 0; 861 1.101 yamaguch } 862 1.1 hannken 863 1.101 yamaguch void 864 1.101 yamaguch vioif_watchdog(struct ifnet *ifp) 865 1.101 yamaguch { 866 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 867 1.101 yamaguch struct vioif_netqueue *netq; 868 1.101 yamaguch int i; 869 1.101 yamaguch 870 1.102 yamaguch if (ISSET(ifp->if_flags, IFF_RUNNING)) { 871 1.102 yamaguch if (ISSET(ifp->if_flags, IFF_DEBUG)) { 872 1.102 yamaguch log(LOG_DEBUG, "%s: watchdog timed out\n", 873 1.102 yamaguch ifp->if_xname); 874 1.102 yamaguch } 875 1.102 yamaguch 876 1.101 yamaguch for (i = 0; i < sc->sc_act_nvq_pairs; i++) { 877 1.101 yamaguch netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)]; 878 1.46 yamaguch 879 1.101 yamaguch mutex_enter(&netq->netq_lock); 880 1.101 yamaguch if (!netq->netq_running_handle) { 881 1.101 yamaguch netq->netq_running_handle = true; 882 1.101 yamaguch vioif_net_sched_handle(sc, netq); 883 1.101 yamaguch } 884 1.101 yamaguch mutex_exit(&netq->netq_lock); 885 1.46 yamaguch } 886 1.1 hannken } 887 1.101 yamaguch } 888 1.101 yamaguch 889 1.101 yamaguch static int 890 1.101 yamaguch vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data) 891 1.101 yamaguch { 892 1.101 yamaguch int s, r; 893 1.101 yamaguch 894 1.101 yamaguch s = splnet(); 895 1.101 yamaguch 896 1.101 yamaguch r = ether_ioctl(ifp, cmd, data); 897 1.101 yamaguch if (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI)) { 898 1.101 yamaguch if (ifp->if_flags & IFF_RUNNING) { 899 1.101 yamaguch r = vioif_rx_filter(ifp->if_softc); 900 1.101 yamaguch } else { 901 1.101 yamaguch r = 0; 902 1.101 yamaguch } 903 1.1 hannken } 904 1.101 yamaguch 905 1.101 yamaguch splx(s); 906 1.101 yamaguch 907 1.101 yamaguch return r; 908 1.1 hannken } 909 1.1 hannken 910 1.101 yamaguch static int 911 1.101 yamaguch vioif_ifflags(struct vioif_softc *sc) 912 1.1 hannken { 913 1.1 hannken struct ifnet *ifp = &sc->sc_ethercom.ec_if; 914 1.101 yamaguch bool onoff; 915 1.101 yamaguch int r; 916 1.1 hannken 917 1.101 yamaguch if (!sc->sc_has_ctrl) { 918 1.101 yamaguch /* no ctrl vq; always promisc and allmulti */ 919 1.101 yamaguch ifp->if_flags |= (IFF_PROMISC | IFF_ALLMULTI); 920 1.101 yamaguch return 0; 921 1.1 hannken } 922 1.1 hannken 923 1.101 yamaguch onoff = ifp->if_flags & IFF_ALLMULTI ? true : false; 924 1.101 yamaguch r = vioif_set_allmulti(sc, onoff); 925 1.101 yamaguch if (r != 0) { 926 1.101 yamaguch log(LOG_WARNING, 927 1.101 yamaguch "%s: couldn't %sable ALLMULTI\n", 928 1.101 yamaguch ifp->if_xname, onoff ? "en" : "dis"); 929 1.105 yamaguch if (onoff) { 930 1.105 yamaguch CLR(ifp->if_flags, IFF_ALLMULTI); 931 1.105 yamaguch } else { 932 1.105 yamaguch SET(ifp->if_flags, IFF_ALLMULTI); 933 1.101 yamaguch } 934 1.101 yamaguch } 935 1.1 hannken 936 1.101 yamaguch onoff = ifp->if_flags & IFF_PROMISC ? true : false; 937 1.101 yamaguch r = vioif_set_promisc(sc, onoff); 938 1.101 yamaguch if (r != 0) { 939 1.101 yamaguch log(LOG_WARNING, 940 1.101 yamaguch "%s: couldn't %sable PROMISC\n", 941 1.101 yamaguch ifp->if_xname, onoff ? "en" : "dis"); 942 1.105 yamaguch if (onoff) { 943 1.105 yamaguch CLR(ifp->if_flags, IFF_PROMISC); 944 1.105 yamaguch } else { 945 1.105 yamaguch SET(ifp->if_flags, IFF_PROMISC); 946 1.101 yamaguch } 947 1.101 yamaguch } 948 1.55 yamaguch 949 1.101 yamaguch return 0; 950 1.101 yamaguch } 951 1.62 yamaguch 952 1.101 yamaguch static int 953 1.101 yamaguch vioif_ifflags_cb(struct ethercom *ec) 954 1.101 yamaguch { 955 1.101 yamaguch struct ifnet *ifp = &ec->ec_if; 956 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 957 1.46 yamaguch 958 1.101 yamaguch return vioif_ifflags(sc); 959 1.101 yamaguch } 960 1.1 hannken 961 1.101 yamaguch static int 962 1.101 yamaguch vioif_setup_sysctl(struct vioif_softc *sc) 963 1.101 yamaguch { 964 1.101 yamaguch const char *devname; 965 1.101 yamaguch struct sysctllog **log; 966 1.101 yamaguch const struct sysctlnode *rnode, *rxnode, *txnode; 967 1.101 yamaguch int error; 968 1.32 jdolecek 969 1.101 yamaguch log = &sc->sc_sysctllog; 970 1.101 yamaguch devname = device_xname(sc->sc_dev); 971 1.32 jdolecek 972 1.101 yamaguch error = sysctl_createv(log, 0, NULL, &rnode, 973 1.101 yamaguch 0, CTLTYPE_NODE, devname, 974 1.101 yamaguch SYSCTL_DESCR("virtio-net information and settings"), 975 1.101 yamaguch NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 976 1.101 yamaguch if (error) 977 1.101 yamaguch goto out; 978 1.7 ozaki 979 1.101 yamaguch error = sysctl_createv(log, 0, &rnode, NULL, 980 1.101 yamaguch CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue", 981 1.101 yamaguch SYSCTL_DESCR("Use workqueue for packet processing"), 982 1.101 yamaguch NULL, 0, &sc->sc_txrx_workqueue_sysctl, 0, CTL_CREATE, CTL_EOL); 983 1.101 yamaguch if (error) 984 1.101 yamaguch goto out; 985 1.32 jdolecek 986 1.101 yamaguch error = sysctl_createv(log, 0, &rnode, &rxnode, 987 1.101 yamaguch 0, CTLTYPE_NODE, "rx", 988 1.101 yamaguch SYSCTL_DESCR("virtio-net information and settings for Rx"), 989 1.101 yamaguch NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 990 1.101 yamaguch if (error) 991 1.101 yamaguch goto out; 992 1.1 hannken 993 1.101 yamaguch error = sysctl_createv(log, 0, &rxnode, NULL, 994 1.101 yamaguch CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 995 1.101 yamaguch SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"), 996 1.101 yamaguch NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 997 1.101 yamaguch if (error) 998 1.101 yamaguch goto out; 999 1.66 reinoud 1000 1.101 yamaguch error = sysctl_createv(log, 0, &rxnode, NULL, 1001 1.101 yamaguch CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 1002 1.101 yamaguch SYSCTL_DESCR("max number of Rx packets to process for deferred processing"), 1003 1.101 yamaguch NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL); 1004 1.101 yamaguch if (error) 1005 1.101 yamaguch goto out; 1006 1.46 yamaguch 1007 1.101 yamaguch error = sysctl_createv(log, 0, &rnode, &txnode, 1008 1.101 yamaguch 0, CTLTYPE_NODE, "tx", 1009 1.101 yamaguch SYSCTL_DESCR("virtio-net information and settings for Tx"), 1010 1.101 yamaguch NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 1011 1.101 yamaguch if (error) 1012 1.101 yamaguch goto out; 1013 1.46 yamaguch 1014 1.101 yamaguch error = sysctl_createv(log, 0, &txnode, NULL, 1015 1.101 yamaguch CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit", 1016 1.101 yamaguch SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"), 1017 1.101 yamaguch NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL); 1018 1.101 yamaguch if (error) 1019 1.101 yamaguch goto out; 1020 1.46 yamaguch 1021 1.101 yamaguch error = sysctl_createv(log, 0, &txnode, NULL, 1022 1.101 yamaguch CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit", 1023 1.101 yamaguch SYSCTL_DESCR("max number of Tx packets to process for deferred processing"), 1024 1.101 yamaguch NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL); 1025 1.46 yamaguch 1026 1.101 yamaguch out: 1027 1.101 yamaguch if (error) 1028 1.101 yamaguch sysctl_teardown(log); 1029 1.46 yamaguch 1030 1.101 yamaguch return error; 1031 1.101 yamaguch } 1032 1.46 yamaguch 1033 1.101 yamaguch static void 1034 1.101 yamaguch vioif_setup_stats(struct vioif_softc *sc) 1035 1.101 yamaguch { 1036 1.101 yamaguch struct vioif_netqueue *netq; 1037 1.101 yamaguch struct vioif_tx_context *txc; 1038 1.101 yamaguch struct vioif_rx_context *rxc; 1039 1.101 yamaguch size_t i, netq_num; 1040 1.7 ozaki 1041 1.98 yamaguch netq_num = sc->sc_max_nvq_pairs * 2; 1042 1.98 yamaguch for (i = 0; i < netq_num; i++) { 1043 1.101 yamaguch netq = &sc->sc_netqs[i]; 1044 1.101 yamaguch evcnt_attach_dynamic(&netq->netq_mbuf_load_failed, EVCNT_TYPE_MISC, 1045 1.101 yamaguch NULL, netq->netq_evgroup, "failed to load mbuf to DMA"); 1046 1.101 yamaguch evcnt_attach_dynamic(&netq->netq_enqueue_failed, 1047 1.101 yamaguch EVCNT_TYPE_MISC, NULL, netq->netq_evgroup, 1048 1.101 yamaguch "virtqueue enqueue failed failed"); 1049 1.17 ozaki 1050 1.101 yamaguch switch (VIOIF_NETQ_DIR(i)) { 1051 1.101 yamaguch case VIOIF_NETQ_RX: 1052 1.101 yamaguch rxc = netq->netq_ctx; 1053 1.101 yamaguch evcnt_attach_dynamic(&rxc->rxc_mbuf_enobufs, 1054 1.101 yamaguch EVCNT_TYPE_MISC, NULL, netq->netq_evgroup, 1055 1.101 yamaguch "no receive buffer"); 1056 1.101 yamaguch break; 1057 1.101 yamaguch case VIOIF_NETQ_TX: 1058 1.101 yamaguch txc = netq->netq_ctx; 1059 1.101 yamaguch evcnt_attach_dynamic(&txc->txc_defrag_failed, 1060 1.101 yamaguch EVCNT_TYPE_MISC, NULL, netq->netq_evgroup, 1061 1.101 yamaguch "m_defrag() failed"); 1062 1.115 ozaki evcnt_attach_dynamic(&txc->txc_pcq_full, 1063 1.115 ozaki EVCNT_TYPE_MISC, NULL, netq->netq_evgroup, 1064 1.115 ozaki "pcq full"); 1065 1.101 yamaguch break; 1066 1.1 hannken } 1067 1.1 hannken } 1068 1.34 ozaki 1069 1.101 yamaguch evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_load_failed, EVCNT_TYPE_MISC, 1070 1.101 yamaguch NULL, device_xname(sc->sc_dev), "control command dmamap load failed"); 1071 1.101 yamaguch evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_failed, EVCNT_TYPE_MISC, 1072 1.101 yamaguch NULL, device_xname(sc->sc_dev), "control command failed"); 1073 1.101 yamaguch } 1074 1.1 hannken 1075 1.101 yamaguch /* 1076 1.101 yamaguch * allocate memory 1077 1.101 yamaguch */ 1078 1.101 yamaguch static int 1079 1.101 yamaguch vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map, 1080 1.101 yamaguch bus_size_t size, int nsegs, const char *usage) 1081 1.101 yamaguch { 1082 1.101 yamaguch int r; 1083 1.1 hannken 1084 1.101 yamaguch r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size, 1085 1.101 yamaguch nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map); 1086 1.32 jdolecek 1087 1.101 yamaguch if (r != 0) { 1088 1.101 yamaguch aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, " 1089 1.101 yamaguch "error code %d\n", usage, r); 1090 1.55 yamaguch } 1091 1.55 yamaguch 1092 1.101 yamaguch return r; 1093 1.101 yamaguch } 1094 1.63 yamaguch 1095 1.101 yamaguch static void 1096 1.101 yamaguch vioif_dmamap_destroy(struct vioif_softc *sc, bus_dmamap_t *map) 1097 1.101 yamaguch { 1098 1.1 hannken 1099 1.101 yamaguch if (*map) { 1100 1.101 yamaguch bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), *map); 1101 1.101 yamaguch *map = NULL; 1102 1.101 yamaguch } 1103 1.101 yamaguch } 1104 1.11 ozaki 1105 1.101 yamaguch static int 1106 1.101 yamaguch vioif_dmamap_create_load(struct vioif_softc *sc, bus_dmamap_t *map, 1107 1.101 yamaguch void *buf, bus_size_t size, int nsegs, int rw, const char *usage) 1108 1.101 yamaguch { 1109 1.101 yamaguch int r; 1110 1.1 hannken 1111 1.101 yamaguch r = vioif_dmamap_create(sc, map, size, nsegs, usage); 1112 1.101 yamaguch if (r != 0) 1113 1.101 yamaguch return 1; 1114 1.1 hannken 1115 1.101 yamaguch r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), *map, buf, 1116 1.101 yamaguch size, NULL, rw | BUS_DMA_NOWAIT); 1117 1.101 yamaguch if (r != 0) { 1118 1.101 yamaguch vioif_dmamap_destroy(sc, map); 1119 1.101 yamaguch aprint_error_dev(sc->sc_dev, "%s dmamap load failed. " 1120 1.101 yamaguch "error code %d\n", usage, r); 1121 1.43 yamaguch } 1122 1.7 ozaki 1123 1.101 yamaguch return r; 1124 1.1 hannken } 1125 1.1 hannken 1126 1.101 yamaguch static void * 1127 1.101 yamaguch vioif_assign_mem(intptr_t *p, size_t size) 1128 1.55 yamaguch { 1129 1.101 yamaguch intptr_t rv; 1130 1.55 yamaguch 1131 1.101 yamaguch rv = *p; 1132 1.101 yamaguch *p += size; 1133 1.55 yamaguch 1134 1.101 yamaguch return (void *)rv; 1135 1.55 yamaguch } 1136 1.55 yamaguch 1137 1.1 hannken /* 1138 1.101 yamaguch * dma memory is used for: 1139 1.101 yamaguch * netq_maps_kva: metadata array for received frames (READ) and 1140 1.101 yamaguch * sent frames (WRITE) 1141 1.101 yamaguch * ctrlq_cmd: command to be sent via ctrl vq (WRITE) 1142 1.101 yamaguch * ctrlq_status: return value for a command via ctrl vq (READ) 1143 1.101 yamaguch * ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command 1144 1.101 yamaguch * (WRITE) 1145 1.101 yamaguch * ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC 1146 1.101 yamaguch * class command (WRITE) 1147 1.101 yamaguch * ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC 1148 1.101 yamaguch * class command (WRITE) 1149 1.101 yamaguch * ctrlq_* structures are allocated only one each; they are protected by 1150 1.101 yamaguch * ctrlq_inuse variable and ctrlq_wait condvar. 1151 1.1 hannken */ 1152 1.1 hannken static int 1153 1.101 yamaguch vioif_alloc_mems(struct vioif_softc *sc) 1154 1.1 hannken { 1155 1.33 ozaki struct virtio_softc *vsc = sc->sc_virtio; 1156 1.98 yamaguch struct vioif_netqueue *netq; 1157 1.43 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 1158 1.101 yamaguch struct vioif_net_map *maps; 1159 1.101 yamaguch unsigned int vq_num; 1160 1.101 yamaguch int r, rsegs; 1161 1.101 yamaguch bus_size_t dmamemsize; 1162 1.101 yamaguch size_t qid, i, netq_num, kmemsize; 1163 1.101 yamaguch void *vaddr; 1164 1.101 yamaguch intptr_t p; 1165 1.101 yamaguch 1166 1.101 yamaguch netq_num = sc->sc_max_nvq_pairs * 2; 1167 1.1 hannken 1168 1.101 yamaguch /* allocate DMA memory */ 1169 1.101 yamaguch dmamemsize = 0; 1170 1.7 ozaki 1171 1.101 yamaguch for (qid = 0; qid < netq_num; qid++) { 1172 1.101 yamaguch maps = sc->sc_netqs[qid].netq_maps; 1173 1.101 yamaguch vq_num = sc->sc_netqs[qid].netq_vq->vq_num; 1174 1.101 yamaguch dmamemsize += sizeof(*maps[0].vnm_hdr) * vq_num; 1175 1.71 yamaguch } 1176 1.71 yamaguch 1177 1.101 yamaguch if (sc->sc_has_ctrl) { 1178 1.101 yamaguch dmamemsize += sizeof(struct virtio_net_ctrl_cmd); 1179 1.101 yamaguch dmamemsize += sizeof(struct virtio_net_ctrl_status); 1180 1.101 yamaguch dmamemsize += sizeof(struct virtio_net_ctrl_rx); 1181 1.101 yamaguch dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl) 1182 1.101 yamaguch + ETHER_ADDR_LEN; 1183 1.101 yamaguch dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl) 1184 1.101 yamaguch + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES; 1185 1.101 yamaguch dmamemsize += sizeof(struct virtio_net_ctrl_mac_addr); 1186 1.101 yamaguch dmamemsize += sizeof(struct virtio_net_ctrl_mq); 1187 1.101 yamaguch } 1188 1.46 yamaguch 1189 1.101 yamaguch r = bus_dmamem_alloc(virtio_dmat(vsc), dmamemsize, 0, 0, 1190 1.101 yamaguch &sc->sc_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 1191 1.101 yamaguch if (r != 0) { 1192 1.101 yamaguch aprint_error_dev(sc->sc_dev, 1193 1.107 nakayama "DMA memory allocation failed, size %" PRIuBUSSIZE ", " 1194 1.101 yamaguch "error code %d\n", dmamemsize, r); 1195 1.101 yamaguch goto err_none; 1196 1.101 yamaguch } 1197 1.101 yamaguch r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_segs[0], 1, 1198 1.101 yamaguch dmamemsize, &vaddr, BUS_DMA_NOWAIT); 1199 1.101 yamaguch if (r != 0) { 1200 1.101 yamaguch aprint_error_dev(sc->sc_dev, 1201 1.101 yamaguch "DMA memory map failed, error code %d\n", r); 1202 1.101 yamaguch goto err_dmamem_alloc; 1203 1.46 yamaguch } 1204 1.46 yamaguch 1205 1.101 yamaguch /* assign DMA memory */ 1206 1.101 yamaguch memset(vaddr, 0, dmamemsize); 1207 1.101 yamaguch sc->sc_dmamem = vaddr; 1208 1.101 yamaguch p = (intptr_t) vaddr; 1209 1.47 yamaguch 1210 1.101 yamaguch for (qid = 0; qid < netq_num; qid++) { 1211 1.101 yamaguch netq = &sc->sc_netqs[qid]; 1212 1.101 yamaguch maps = netq->netq_maps; 1213 1.101 yamaguch vq_num = netq->netq_vq->vq_num; 1214 1.46 yamaguch 1215 1.101 yamaguch netq->netq_maps_kva = vioif_assign_mem(&p, 1216 1.101 yamaguch sizeof(*maps[0].vnm_hdr) * vq_num); 1217 1.101 yamaguch } 1218 1.46 yamaguch 1219 1.101 yamaguch if (sc->sc_has_ctrl) { 1220 1.101 yamaguch ctrlq->ctrlq_cmd = vioif_assign_mem(&p, 1221 1.101 yamaguch sizeof(*ctrlq->ctrlq_cmd)); 1222 1.101 yamaguch ctrlq->ctrlq_status = vioif_assign_mem(&p, 1223 1.101 yamaguch sizeof(*ctrlq->ctrlq_status)); 1224 1.101 yamaguch ctrlq->ctrlq_rx = vioif_assign_mem(&p, 1225 1.101 yamaguch sizeof(*ctrlq->ctrlq_rx)); 1226 1.101 yamaguch ctrlq->ctrlq_mac_tbl_uc = vioif_assign_mem(&p, 1227 1.101 yamaguch sizeof(*ctrlq->ctrlq_mac_tbl_uc) 1228 1.101 yamaguch + ETHER_ADDR_LEN); 1229 1.101 yamaguch ctrlq->ctrlq_mac_tbl_mc = vioif_assign_mem(&p, 1230 1.101 yamaguch sizeof(*ctrlq->ctrlq_mac_tbl_mc) 1231 1.101 yamaguch + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES); 1232 1.101 yamaguch ctrlq->ctrlq_mac_addr = vioif_assign_mem(&p, 1233 1.101 yamaguch sizeof(*ctrlq->ctrlq_mac_addr)); 1234 1.101 yamaguch ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq)); 1235 1.76 yamaguch } 1236 1.33 ozaki 1237 1.101 yamaguch /* allocate kmem */ 1238 1.101 yamaguch kmemsize = 0; 1239 1.85 yamaguch 1240 1.101 yamaguch for (qid = 0; qid < netq_num; qid++) { 1241 1.101 yamaguch netq = &sc->sc_netqs[qid]; 1242 1.101 yamaguch vq_num = netq->netq_vq->vq_num; 1243 1.55 yamaguch 1244 1.101 yamaguch kmemsize += sizeof(netq->netq_maps[0]) * vq_num; 1245 1.85 yamaguch } 1246 1.85 yamaguch 1247 1.101 yamaguch vaddr = kmem_zalloc(kmemsize, KM_SLEEP); 1248 1.101 yamaguch sc->sc_kmem = vaddr; 1249 1.98 yamaguch 1250 1.101 yamaguch /* assign allocated kmem */ 1251 1.101 yamaguch p = (intptr_t) vaddr; 1252 1.46 yamaguch 1253 1.101 yamaguch for (qid = 0; qid < netq_num; qid++) { 1254 1.101 yamaguch netq = &sc->sc_netqs[qid]; 1255 1.101 yamaguch vq_num = netq->netq_vq->vq_num; 1256 1.85 yamaguch 1257 1.101 yamaguch netq->netq_maps = vioif_assign_mem(&p, 1258 1.101 yamaguch sizeof(netq->netq_maps[0]) * vq_num); 1259 1.85 yamaguch } 1260 1.1 hannken 1261 1.101 yamaguch /* prepare dmamaps */ 1262 1.101 yamaguch for (qid = 0; qid < netq_num; qid++) { 1263 1.101 yamaguch static const struct { 1264 1.101 yamaguch const char *msg_hdr; 1265 1.101 yamaguch const char *msg_payload; 1266 1.101 yamaguch int dma_flag; 1267 1.101 yamaguch bus_size_t dma_size; 1268 1.101 yamaguch int dma_nsegs; 1269 1.101 yamaguch } dmaparams[VIOIF_NETQ_IDX] = { 1270 1.101 yamaguch [VIOIF_NETQ_RX] = { 1271 1.101 yamaguch .msg_hdr = "rx header", 1272 1.101 yamaguch .msg_payload = "rx payload", 1273 1.101 yamaguch .dma_flag = BUS_DMA_READ, 1274 1.101 yamaguch .dma_size = MCLBYTES - ETHER_ALIGN, 1275 1.101 yamaguch .dma_nsegs = 1, 1276 1.101 yamaguch }, 1277 1.101 yamaguch [VIOIF_NETQ_TX] = { 1278 1.101 yamaguch .msg_hdr = "tx header", 1279 1.101 yamaguch .msg_payload = "tx payload", 1280 1.101 yamaguch .dma_flag = BUS_DMA_WRITE, 1281 1.101 yamaguch .dma_size = ETHER_MAX_LEN, 1282 1.101 yamaguch .dma_nsegs = VIRTIO_NET_TX_MAXNSEGS, 1283 1.101 yamaguch } 1284 1.101 yamaguch }; 1285 1.1 hannken 1286 1.101 yamaguch struct virtio_net_hdr *hdrs; 1287 1.101 yamaguch int dir; 1288 1.111 isaki int nsegs; 1289 1.46 yamaguch 1290 1.101 yamaguch dir = VIOIF_NETQ_DIR(qid); 1291 1.101 yamaguch netq = &sc->sc_netqs[qid]; 1292 1.101 yamaguch vq_num = netq->netq_vq->vq_num; 1293 1.101 yamaguch maps = netq->netq_maps; 1294 1.101 yamaguch hdrs = netq->netq_maps_kva; 1295 1.111 isaki nsegs = uimin(dmaparams[dir].dma_nsegs, vq_num - 1/*hdr*/); 1296 1.7 ozaki 1297 1.101 yamaguch for (i = 0; i < vq_num; i++) { 1298 1.101 yamaguch maps[i].vnm_hdr = &hdrs[i]; 1299 1.114 joe 1300 1.101 yamaguch r = vioif_dmamap_create_load(sc, &maps[i].vnm_hdr_map, 1301 1.101 yamaguch maps[i].vnm_hdr, sc->sc_hdr_size, 1, 1302 1.101 yamaguch dmaparams[dir].dma_flag, dmaparams[dir].msg_hdr); 1303 1.101 yamaguch if (r != 0) 1304 1.101 yamaguch goto err_reqs; 1305 1.98 yamaguch 1306 1.101 yamaguch r = vioif_dmamap_create(sc, &maps[i].vnm_mbuf_map, 1307 1.111 isaki dmaparams[dir].dma_size, nsegs, 1308 1.101 yamaguch dmaparams[dir].msg_payload); 1309 1.101 yamaguch if (r != 0) 1310 1.101 yamaguch goto err_reqs; 1311 1.101 yamaguch } 1312 1.101 yamaguch } 1313 1.7 ozaki 1314 1.101 yamaguch if (sc->sc_has_ctrl) { 1315 1.101 yamaguch /* control vq class & command */ 1316 1.101 yamaguch r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_cmd_dmamap, 1317 1.101 yamaguch ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1, 1318 1.101 yamaguch BUS_DMA_WRITE, "control command"); 1319 1.101 yamaguch if (r != 0) 1320 1.101 yamaguch goto err_reqs; 1321 1.1 hannken 1322 1.101 yamaguch r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_status_dmamap, 1323 1.101 yamaguch ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1, 1324 1.101 yamaguch BUS_DMA_READ, "control status"); 1325 1.101 yamaguch if (r != 0) 1326 1.101 yamaguch goto err_reqs; 1327 1.1 hannken 1328 1.101 yamaguch /* control vq rx mode command parameter */ 1329 1.101 yamaguch r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_rx_dmamap, 1330 1.101 yamaguch ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1, 1331 1.101 yamaguch BUS_DMA_WRITE, "rx mode control command"); 1332 1.101 yamaguch if (r != 0) 1333 1.101 yamaguch goto err_reqs; 1334 1.46 yamaguch 1335 1.101 yamaguch /* multiqueue set command */ 1336 1.101 yamaguch r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_mq_dmamap, 1337 1.101 yamaguch ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1, 1338 1.101 yamaguch BUS_DMA_WRITE, "multiqueue set command"); 1339 1.101 yamaguch if (r != 0) 1340 1.101 yamaguch goto err_reqs; 1341 1.36 jdolecek 1342 1.101 yamaguch /* control vq MAC filter table for unicast */ 1343 1.101 yamaguch /* do not load now since its length is variable */ 1344 1.101 yamaguch r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_uc_dmamap, 1345 1.101 yamaguch sizeof(*ctrlq->ctrlq_mac_tbl_uc) 1346 1.101 yamaguch + ETHER_ADDR_LEN, 1, 1347 1.101 yamaguch "unicast MAC address filter command"); 1348 1.101 yamaguch if (r != 0) 1349 1.101 yamaguch goto err_reqs; 1350 1.95 yamaguch 1351 1.101 yamaguch /* control vq MAC filter table for multicast */ 1352 1.101 yamaguch r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_mc_dmamap, 1353 1.101 yamaguch sizeof(*ctrlq->ctrlq_mac_tbl_mc) 1354 1.101 yamaguch + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1, 1355 1.101 yamaguch "multicast MAC address filter command"); 1356 1.101 yamaguch if (r != 0) 1357 1.101 yamaguch goto err_reqs; 1358 1.36 jdolecek 1359 1.101 yamaguch /* control vq MAC address set command */ 1360 1.101 yamaguch r = vioif_dmamap_create_load(sc, 1361 1.101 yamaguch &ctrlq->ctrlq_mac_addr_dmamap, 1362 1.101 yamaguch ctrlq->ctrlq_mac_addr, 1363 1.101 yamaguch sizeof(*ctrlq->ctrlq_mac_addr), 1, 1364 1.101 yamaguch BUS_DMA_WRITE, "mac addr set command"); 1365 1.101 yamaguch if (r != 0) 1366 1.101 yamaguch goto err_reqs; 1367 1.101 yamaguch } 1368 1.36 jdolecek 1369 1.101 yamaguch return 0; 1370 1.36 jdolecek 1371 1.101 yamaguch err_reqs: 1372 1.101 yamaguch vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_mc_dmamap); 1373 1.101 yamaguch vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_uc_dmamap); 1374 1.101 yamaguch vioif_dmamap_destroy(sc, &ctrlq->ctrlq_rx_dmamap); 1375 1.101 yamaguch vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap); 1376 1.101 yamaguch vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap); 1377 1.101 yamaguch vioif_dmamap_destroy(sc, &ctrlq->ctrlq_mac_addr_dmamap); 1378 1.101 yamaguch for (qid = 0; qid < netq_num; qid++) { 1379 1.101 yamaguch vq_num = sc->sc_netqs[qid].netq_vq->vq_num; 1380 1.101 yamaguch maps = sc->sc_netqs[qid].netq_maps; 1381 1.99 yamaguch 1382 1.101 yamaguch for (i = 0; i < vq_num; i++) { 1383 1.101 yamaguch vioif_dmamap_destroy(sc, &maps[i].vnm_mbuf_map); 1384 1.101 yamaguch vioif_dmamap_destroy(sc, &maps[i].vnm_hdr_map); 1385 1.1 hannken } 1386 1.1 hannken } 1387 1.101 yamaguch if (sc->sc_kmem) { 1388 1.101 yamaguch kmem_free(sc->sc_kmem, kmemsize); 1389 1.101 yamaguch sc->sc_kmem = NULL; 1390 1.1 hannken } 1391 1.101 yamaguch bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, dmamemsize); 1392 1.101 yamaguch err_dmamem_alloc: 1393 1.101 yamaguch bus_dmamem_free(virtio_dmat(vsc), &sc->sc_segs[0], 1); 1394 1.101 yamaguch err_none: 1395 1.101 yamaguch return -1; 1396 1.46 yamaguch } 1397 1.46 yamaguch 1398 1.46 yamaguch static void 1399 1.101 yamaguch vioif_alloc_queues(struct vioif_softc *sc) 1400 1.46 yamaguch { 1401 1.101 yamaguch int nvq_pairs = sc->sc_max_nvq_pairs; 1402 1.101 yamaguch size_t nvqs, netq_num; 1403 1.101 yamaguch 1404 1.101 yamaguch KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX); 1405 1.46 yamaguch 1406 1.101 yamaguch nvqs = netq_num = sc->sc_max_nvq_pairs * 2; 1407 1.101 yamaguch if (sc->sc_has_ctrl) 1408 1.101 yamaguch nvqs++; 1409 1.46 yamaguch 1410 1.101 yamaguch sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP); 1411 1.106 yamaguch sc->sc_netqs = kmem_zalloc(sizeof(sc->sc_netqs[0]) * netq_num, 1412 1.101 yamaguch KM_SLEEP); 1413 1.46 yamaguch } 1414 1.46 yamaguch 1415 1.46 yamaguch static void 1416 1.101 yamaguch vioif_free_queues(struct vioif_softc *sc) 1417 1.46 yamaguch { 1418 1.101 yamaguch size_t nvqs, netq_num; 1419 1.46 yamaguch 1420 1.101 yamaguch nvqs = netq_num = sc->sc_max_nvq_pairs * 2; 1421 1.101 yamaguch if (sc->sc_ctrlq.ctrlq_vq) 1422 1.101 yamaguch nvqs++; 1423 1.46 yamaguch 1424 1.101 yamaguch kmem_free(sc->sc_netqs, sizeof(sc->sc_netqs[0]) * netq_num); 1425 1.101 yamaguch kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs); 1426 1.101 yamaguch sc->sc_netqs = NULL; 1427 1.101 yamaguch sc->sc_vqs = NULL; 1428 1.46 yamaguch } 1429 1.46 yamaguch 1430 1.101 yamaguch /* 1431 1.101 yamaguch * Network queues 1432 1.101 yamaguch */ 1433 1.101 yamaguch static int 1434 1.101 yamaguch vioif_netqueue_init(struct vioif_softc *sc, struct virtio_softc *vsc, 1435 1.101 yamaguch size_t qid, u_int softint_flags) 1436 1.46 yamaguch { 1437 1.101 yamaguch static const struct { 1438 1.101 yamaguch const char *dirname; 1439 1.101 yamaguch int segsize; 1440 1.101 yamaguch int nsegs; 1441 1.101 yamaguch int (*intrhand)(void *); 1442 1.101 yamaguch void (*sihand)(void *); 1443 1.101 yamaguch } params[VIOIF_NETQ_IDX] = { 1444 1.101 yamaguch [VIOIF_NETQ_RX] = { 1445 1.101 yamaguch .dirname = "rx", 1446 1.101 yamaguch .segsize = MCLBYTES, 1447 1.101 yamaguch .nsegs = 2, 1448 1.101 yamaguch .intrhand = vioif_rx_intr, 1449 1.101 yamaguch .sihand = vioif_rx_handle, 1450 1.101 yamaguch }, 1451 1.101 yamaguch [VIOIF_NETQ_TX] = { 1452 1.101 yamaguch .dirname = "tx", 1453 1.101 yamaguch .segsize = ETHER_MAX_LEN - ETHER_HDR_LEN, 1454 1.101 yamaguch .nsegs = 2, 1455 1.101 yamaguch .intrhand = vioif_tx_intr, 1456 1.101 yamaguch .sihand = vioif_tx_handle, 1457 1.101 yamaguch } 1458 1.101 yamaguch }; 1459 1.46 yamaguch 1460 1.101 yamaguch struct virtqueue *vq; 1461 1.101 yamaguch struct vioif_netqueue *netq; 1462 1.101 yamaguch struct vioif_tx_context *txc; 1463 1.101 yamaguch struct vioif_rx_context *rxc; 1464 1.101 yamaguch char qname[32]; 1465 1.101 yamaguch int r, dir; 1466 1.46 yamaguch 1467 1.101 yamaguch txc = NULL; 1468 1.101 yamaguch rxc = NULL; 1469 1.101 yamaguch netq = &sc->sc_netqs[qid]; 1470 1.101 yamaguch vq = &sc->sc_vqs[qid]; 1471 1.101 yamaguch dir = VIOIF_NETQ_DIR(qid); 1472 1.46 yamaguch 1473 1.101 yamaguch netq->netq_vq = &sc->sc_vqs[qid]; 1474 1.101 yamaguch netq->netq_stopping = false; 1475 1.101 yamaguch netq->netq_running_handle = false; 1476 1.46 yamaguch 1477 1.101 yamaguch snprintf(qname, sizeof(qname), "%s%zu", 1478 1.101 yamaguch params[dir].dirname, VIOIF_NETQ_PAIRIDX(qid)); 1479 1.101 yamaguch snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup), 1480 1.101 yamaguch "%s-%s", device_xname(sc->sc_dev), qname); 1481 1.46 yamaguch 1482 1.101 yamaguch mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET); 1483 1.104 yamaguch virtio_init_vq(vsc, vq, qid, params[dir].intrhand, netq); 1484 1.104 yamaguch 1485 1.104 yamaguch r = virtio_alloc_vq(vsc, vq, 1486 1.101 yamaguch params[dir].segsize + sc->sc_hdr_size, 1487 1.101 yamaguch params[dir].nsegs, qname); 1488 1.101 yamaguch if (r != 0) 1489 1.101 yamaguch goto err; 1490 1.101 yamaguch netq->netq_vq = vq; 1491 1.46 yamaguch 1492 1.101 yamaguch netq->netq_softint = softint_establish(softint_flags, 1493 1.101 yamaguch params[dir].sihand, netq); 1494 1.101 yamaguch if (netq->netq_softint == NULL) { 1495 1.101 yamaguch aprint_error_dev(sc->sc_dev, 1496 1.101 yamaguch "couldn't establish %s softint\n", 1497 1.101 yamaguch params[dir].dirname); 1498 1.101 yamaguch goto err; 1499 1.46 yamaguch } 1500 1.101 yamaguch vioif_work_set(&netq->netq_work, params[dir].sihand, netq); 1501 1.46 yamaguch 1502 1.101 yamaguch switch (dir) { 1503 1.101 yamaguch case VIOIF_NETQ_RX: 1504 1.101 yamaguch rxc = kmem_zalloc(sizeof(*rxc), KM_SLEEP); 1505 1.101 yamaguch netq->netq_ctx = rxc; 1506 1.101 yamaguch /* nothing to do */ 1507 1.101 yamaguch break; 1508 1.101 yamaguch case VIOIF_NETQ_TX: 1509 1.101 yamaguch txc = kmem_zalloc(sizeof(*txc), KM_SLEEP); 1510 1.101 yamaguch netq->netq_ctx = (void *)txc; 1511 1.101 yamaguch txc->txc_deferred_transmit = softint_establish(softint_flags, 1512 1.101 yamaguch vioif_deferred_transmit, netq); 1513 1.101 yamaguch if (txc->txc_deferred_transmit == NULL) { 1514 1.101 yamaguch aprint_error_dev(sc->sc_dev, 1515 1.101 yamaguch "couldn't establish softint for " 1516 1.101 yamaguch "tx deferred transmit\n"); 1517 1.101 yamaguch goto err; 1518 1.101 yamaguch } 1519 1.101 yamaguch txc->txc_link_active = VIOIF_IS_LINK_ACTIVE(sc); 1520 1.102 yamaguch txc->txc_no_free_slots = false; 1521 1.101 yamaguch txc->txc_intrq = pcq_create(vq->vq_num, KM_SLEEP); 1522 1.101 yamaguch break; 1523 1.46 yamaguch } 1524 1.46 yamaguch 1525 1.46 yamaguch return 0; 1526 1.46 yamaguch 1527 1.101 yamaguch err: 1528 1.101 yamaguch netq->netq_ctx = NULL; 1529 1.7 ozaki 1530 1.101 yamaguch if (rxc != NULL) { 1531 1.101 yamaguch kmem_free(rxc, sizeof(*rxc)); 1532 1.101 yamaguch } 1533 1.1 hannken 1534 1.101 yamaguch if (txc != NULL) { 1535 1.101 yamaguch if (txc->txc_deferred_transmit != NULL) 1536 1.101 yamaguch softint_disestablish(txc->txc_deferred_transmit); 1537 1.101 yamaguch if (txc->txc_intrq != NULL) 1538 1.101 yamaguch pcq_destroy(txc->txc_intrq); 1539 1.114 joe kmem_free(txc, sizeof(*txc)); 1540 1.101 yamaguch } 1541 1.1 hannken 1542 1.101 yamaguch vioif_work_set(&netq->netq_work, NULL, NULL); 1543 1.101 yamaguch if (netq->netq_softint != NULL) { 1544 1.101 yamaguch softint_disestablish(netq->netq_softint); 1545 1.101 yamaguch netq->netq_softint = NULL; 1546 1.1 hannken } 1547 1.1 hannken 1548 1.101 yamaguch virtio_free_vq(vsc, vq); 1549 1.101 yamaguch mutex_destroy(&netq->netq_lock); 1550 1.101 yamaguch netq->netq_vq = NULL; 1551 1.1 hannken 1552 1.101 yamaguch return -1; 1553 1.1 hannken } 1554 1.1 hannken 1555 1.101 yamaguch static void 1556 1.101 yamaguch vioif_netqueue_teardown(struct vioif_softc *sc, struct virtio_softc *vsc, 1557 1.101 yamaguch size_t qid) 1558 1.1 hannken { 1559 1.98 yamaguch struct vioif_netqueue *netq; 1560 1.101 yamaguch struct vioif_rx_context *rxc; 1561 1.101 yamaguch struct vioif_tx_context *txc; 1562 1.101 yamaguch int dir; 1563 1.101 yamaguch 1564 1.101 yamaguch netq = &sc->sc_netqs[qid]; 1565 1.101 yamaguch 1566 1.101 yamaguch if (netq->netq_vq == NULL) 1567 1.101 yamaguch return; 1568 1.101 yamaguch 1569 1.101 yamaguch netq = &sc->sc_netqs[qid]; 1570 1.101 yamaguch dir = VIOIF_NETQ_DIR(qid); 1571 1.101 yamaguch switch (dir) { 1572 1.101 yamaguch case VIOIF_NETQ_RX: 1573 1.101 yamaguch rxc = netq->netq_ctx; 1574 1.101 yamaguch netq->netq_ctx = NULL; 1575 1.101 yamaguch kmem_free(rxc, sizeof(*rxc)); 1576 1.101 yamaguch break; 1577 1.101 yamaguch case VIOIF_NETQ_TX: 1578 1.101 yamaguch txc = netq->netq_ctx; 1579 1.101 yamaguch netq->netq_ctx = NULL; 1580 1.101 yamaguch softint_disestablish(txc->txc_deferred_transmit); 1581 1.101 yamaguch pcq_destroy(txc->txc_intrq); 1582 1.101 yamaguch kmem_free(txc, sizeof(*txc)); 1583 1.101 yamaguch break; 1584 1.101 yamaguch } 1585 1.1 hannken 1586 1.101 yamaguch softint_disestablish(netq->netq_softint); 1587 1.101 yamaguch virtio_free_vq(vsc, netq->netq_vq); 1588 1.101 yamaguch mutex_destroy(&netq->netq_lock); 1589 1.101 yamaguch netq->netq_vq = NULL; 1590 1.1 hannken } 1591 1.1 hannken 1592 1.98 yamaguch static void 1593 1.98 yamaguch vioif_net_sched_handle(struct vioif_softc *sc, struct vioif_netqueue *netq) 1594 1.98 yamaguch { 1595 1.98 yamaguch 1596 1.98 yamaguch KASSERT(mutex_owned(&netq->netq_lock)); 1597 1.98 yamaguch KASSERT(!netq->netq_stopping); 1598 1.98 yamaguch 1599 1.98 yamaguch if (netq->netq_workqueue) { 1600 1.98 yamaguch vioif_work_add(sc->sc_txrx_workqueue, &netq->netq_work); 1601 1.98 yamaguch } else { 1602 1.98 yamaguch softint_schedule(netq->netq_softint); 1603 1.98 yamaguch } 1604 1.98 yamaguch } 1605 1.98 yamaguch 1606 1.99 yamaguch static int 1607 1.99 yamaguch vioif_net_load_mbuf(struct virtio_softc *vsc, struct vioif_net_map *map, 1608 1.99 yamaguch struct mbuf *m, int dma_flags) 1609 1.99 yamaguch { 1610 1.99 yamaguch int r; 1611 1.99 yamaguch 1612 1.99 yamaguch KASSERT(map->vnm_mbuf == NULL); 1613 1.99 yamaguch 1614 1.99 yamaguch r = bus_dmamap_load_mbuf(virtio_dmat(vsc), 1615 1.99 yamaguch map->vnm_mbuf_map, m, dma_flags | BUS_DMA_NOWAIT); 1616 1.99 yamaguch if (r == 0) { 1617 1.99 yamaguch map->vnm_mbuf = m; 1618 1.99 yamaguch } 1619 1.99 yamaguch 1620 1.99 yamaguch return r; 1621 1.99 yamaguch } 1622 1.99 yamaguch 1623 1.99 yamaguch static void 1624 1.99 yamaguch vioif_net_unload_mbuf(struct virtio_softc *vsc, struct vioif_net_map *map) 1625 1.99 yamaguch { 1626 1.99 yamaguch 1627 1.99 yamaguch KASSERT(map->vnm_mbuf != NULL); 1628 1.99 yamaguch bus_dmamap_unload(virtio_dmat(vsc), map->vnm_mbuf_map); 1629 1.99 yamaguch map->vnm_mbuf = NULL; 1630 1.99 yamaguch } 1631 1.99 yamaguch 1632 1.99 yamaguch static int 1633 1.99 yamaguch vioif_net_enqueue(struct virtio_softc *vsc, struct virtqueue *vq, 1634 1.99 yamaguch int slot, struct vioif_net_map *map, int dma_ops, bool is_write) 1635 1.99 yamaguch { 1636 1.99 yamaguch int r; 1637 1.99 yamaguch 1638 1.99 yamaguch KASSERT(map->vnm_mbuf != NULL); 1639 1.99 yamaguch 1640 1.99 yamaguch /* This should actually never fail */ 1641 1.99 yamaguch r = virtio_enqueue_reserve(vsc, vq, slot, 1642 1.99 yamaguch map->vnm_mbuf_map->dm_nsegs + 1); 1643 1.99 yamaguch if (r != 0) { 1644 1.99 yamaguch /* slot already freed by virtio_enqueue_reserve */ 1645 1.99 yamaguch return r; 1646 1.99 yamaguch } 1647 1.99 yamaguch 1648 1.99 yamaguch bus_dmamap_sync(virtio_dmat(vsc), map->vnm_mbuf_map, 1649 1.99 yamaguch 0, map->vnm_mbuf_map->dm_mapsize, dma_ops); 1650 1.99 yamaguch bus_dmamap_sync(virtio_dmat(vsc), map->vnm_hdr_map, 1651 1.99 yamaguch 0, map->vnm_hdr_map->dm_mapsize, dma_ops); 1652 1.99 yamaguch 1653 1.99 yamaguch virtio_enqueue(vsc, vq, slot, map->vnm_hdr_map, is_write); 1654 1.99 yamaguch virtio_enqueue(vsc, vq, slot, map->vnm_mbuf_map, is_write); 1655 1.99 yamaguch virtio_enqueue_commit(vsc, vq, slot, false); 1656 1.99 yamaguch 1657 1.99 yamaguch return 0; 1658 1.99 yamaguch } 1659 1.99 yamaguch 1660 1.99 yamaguch static int 1661 1.99 yamaguch vioif_net_enqueue_tx(struct virtio_softc *vsc, struct virtqueue *vq, 1662 1.99 yamaguch int slot, struct vioif_net_map *map) 1663 1.99 yamaguch { 1664 1.99 yamaguch 1665 1.99 yamaguch return vioif_net_enqueue(vsc, vq, slot, map, 1666 1.99 yamaguch BUS_DMASYNC_PREWRITE, true); 1667 1.99 yamaguch } 1668 1.99 yamaguch 1669 1.99 yamaguch static int 1670 1.99 yamaguch vioif_net_enqueue_rx(struct virtio_softc *vsc, struct virtqueue *vq, 1671 1.99 yamaguch int slot, struct vioif_net_map *map) 1672 1.99 yamaguch { 1673 1.99 yamaguch 1674 1.99 yamaguch return vioif_net_enqueue(vsc, vq, slot, map, 1675 1.99 yamaguch BUS_DMASYNC_PREREAD, false); 1676 1.99 yamaguch } 1677 1.99 yamaguch 1678 1.101 yamaguch static struct mbuf * 1679 1.101 yamaguch vioif_net_dequeue_commit(struct virtio_softc *vsc, struct virtqueue *vq, 1680 1.101 yamaguch int slot, struct vioif_net_map *map, int dma_flags) 1681 1.101 yamaguch { 1682 1.101 yamaguch struct mbuf *m; 1683 1.101 yamaguch 1684 1.101 yamaguch m = map->vnm_mbuf; 1685 1.101 yamaguch KASSERT(m != NULL); 1686 1.101 yamaguch map->vnm_mbuf = NULL; 1687 1.101 yamaguch 1688 1.101 yamaguch bus_dmamap_sync(virtio_dmat(vsc), map->vnm_hdr_map, 1689 1.101 yamaguch 0, map->vnm_hdr_map->dm_mapsize, dma_flags); 1690 1.101 yamaguch bus_dmamap_sync(virtio_dmat(vsc), map->vnm_mbuf_map, 1691 1.101 yamaguch 0, map->vnm_mbuf_map->dm_mapsize, dma_flags); 1692 1.101 yamaguch 1693 1.101 yamaguch bus_dmamap_unload(virtio_dmat(vsc), map->vnm_mbuf_map); 1694 1.101 yamaguch virtio_dequeue_commit(vsc, vq, slot); 1695 1.101 yamaguch 1696 1.101 yamaguch return m; 1697 1.101 yamaguch } 1698 1.101 yamaguch 1699 1.101 yamaguch static void 1700 1.101 yamaguch vioif_net_intr_enable(struct vioif_softc *sc, struct virtio_softc *vsc) 1701 1.101 yamaguch { 1702 1.101 yamaguch struct vioif_netqueue *netq; 1703 1.101 yamaguch size_t i, act_qnum; 1704 1.101 yamaguch int enqueued; 1705 1.101 yamaguch 1706 1.101 yamaguch act_qnum = sc->sc_act_nvq_pairs * 2; 1707 1.101 yamaguch for (i = 0; i < act_qnum; i++) { 1708 1.101 yamaguch netq = &sc->sc_netqs[i]; 1709 1.101 yamaguch 1710 1.101 yamaguch KASSERT(!netq->netq_stopping); 1711 1.101 yamaguch KASSERT(!netq->netq_running_handle); 1712 1.101 yamaguch 1713 1.101 yamaguch enqueued = virtio_start_vq_intr(vsc, netq->netq_vq); 1714 1.101 yamaguch if (enqueued != 0) { 1715 1.101 yamaguch virtio_stop_vq_intr(vsc, netq->netq_vq); 1716 1.101 yamaguch 1717 1.101 yamaguch mutex_enter(&netq->netq_lock); 1718 1.101 yamaguch netq->netq_running_handle = true; 1719 1.101 yamaguch vioif_net_sched_handle(sc, netq); 1720 1.101 yamaguch mutex_exit(&netq->netq_lock); 1721 1.101 yamaguch } 1722 1.101 yamaguch } 1723 1.101 yamaguch } 1724 1.101 yamaguch 1725 1.101 yamaguch static void 1726 1.101 yamaguch vioif_net_intr_disable(struct vioif_softc *sc, struct virtio_softc *vsc) 1727 1.101 yamaguch { 1728 1.101 yamaguch struct vioif_netqueue *netq; 1729 1.101 yamaguch size_t i, act_qnum; 1730 1.101 yamaguch 1731 1.101 yamaguch act_qnum = sc->sc_act_nvq_pairs * 2; 1732 1.101 yamaguch for (i = 0; i < act_qnum; i++) { 1733 1.101 yamaguch netq = &sc->sc_netqs[i]; 1734 1.101 yamaguch 1735 1.101 yamaguch virtio_stop_vq_intr(vsc, netq->netq_vq); 1736 1.101 yamaguch } 1737 1.101 yamaguch } 1738 1.101 yamaguch 1739 1.101 yamaguch /* 1740 1.101 yamaguch * Receive implementation 1741 1.101 yamaguch */ 1742 1.101 yamaguch /* enqueue mbufs to receive slots */ 1743 1.101 yamaguch static void 1744 1.101 yamaguch vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_netqueue *netq) 1745 1.101 yamaguch { 1746 1.101 yamaguch struct virtqueue *vq = netq->netq_vq; 1747 1.101 yamaguch struct virtio_softc *vsc = vq->vq_owner; 1748 1.101 yamaguch struct vioif_rx_context *rxc; 1749 1.101 yamaguch struct vioif_net_map *map; 1750 1.101 yamaguch struct mbuf *m; 1751 1.101 yamaguch int i, r, ndone = 0; 1752 1.101 yamaguch 1753 1.101 yamaguch KASSERT(mutex_owned(&netq->netq_lock)); 1754 1.101 yamaguch 1755 1.101 yamaguch rxc = netq->netq_ctx; 1756 1.101 yamaguch 1757 1.101 yamaguch for (i = 0; i < vq->vq_num; i++) { 1758 1.101 yamaguch int slot; 1759 1.101 yamaguch r = virtio_enqueue_prep(vsc, vq, &slot); 1760 1.101 yamaguch if (r == EAGAIN) 1761 1.101 yamaguch break; 1762 1.101 yamaguch if (__predict_false(r != 0)) 1763 1.101 yamaguch panic("enqueue_prep for rx buffers"); 1764 1.101 yamaguch 1765 1.101 yamaguch MGETHDR(m, M_DONTWAIT, MT_DATA); 1766 1.101 yamaguch if (m == NULL) { 1767 1.101 yamaguch virtio_enqueue_abort(vsc, vq, slot); 1768 1.101 yamaguch rxc->rxc_mbuf_enobufs.ev_count++; 1769 1.101 yamaguch break; 1770 1.101 yamaguch } 1771 1.113 mlelstv MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1772 1.101 yamaguch MCLGET(m, M_DONTWAIT); 1773 1.101 yamaguch if ((m->m_flags & M_EXT) == 0) { 1774 1.101 yamaguch virtio_enqueue_abort(vsc, vq, slot); 1775 1.101 yamaguch m_freem(m); 1776 1.101 yamaguch rxc->rxc_mbuf_enobufs.ev_count++; 1777 1.101 yamaguch break; 1778 1.101 yamaguch } 1779 1.101 yamaguch 1780 1.101 yamaguch m->m_len = m->m_pkthdr.len = MCLBYTES; 1781 1.101 yamaguch m_adj(m, ETHER_ALIGN); 1782 1.101 yamaguch 1783 1.101 yamaguch map = &netq->netq_maps[slot]; 1784 1.101 yamaguch r = vioif_net_load_mbuf(vsc, map, m, BUS_DMA_READ); 1785 1.101 yamaguch if (r != 0) { 1786 1.101 yamaguch virtio_enqueue_abort(vsc, vq, slot); 1787 1.101 yamaguch m_freem(m); 1788 1.101 yamaguch netq->netq_mbuf_load_failed.ev_count++; 1789 1.101 yamaguch break; 1790 1.101 yamaguch } 1791 1.101 yamaguch 1792 1.101 yamaguch r = vioif_net_enqueue_rx(vsc, vq, slot, map); 1793 1.101 yamaguch if (r != 0) { 1794 1.101 yamaguch vioif_net_unload_mbuf(vsc, map); 1795 1.101 yamaguch netq->netq_enqueue_failed.ev_count++; 1796 1.101 yamaguch m_freem(m); 1797 1.101 yamaguch /* slot already freed by vioif_net_enqueue_rx */ 1798 1.101 yamaguch break; 1799 1.101 yamaguch } 1800 1.101 yamaguch 1801 1.101 yamaguch ndone++; 1802 1.101 yamaguch } 1803 1.101 yamaguch 1804 1.101 yamaguch if (ndone > 0) 1805 1.101 yamaguch vioif_notify(vsc, vq); 1806 1.101 yamaguch } 1807 1.101 yamaguch 1808 1.101 yamaguch /* dequeue received packets */ 1809 1.101 yamaguch static bool 1810 1.101 yamaguch vioif_rx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc, 1811 1.101 yamaguch struct vioif_netqueue *netq, u_int limit, size_t *ndeqp) 1812 1.101 yamaguch { 1813 1.101 yamaguch struct virtqueue *vq = netq->netq_vq; 1814 1.101 yamaguch struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1815 1.101 yamaguch struct vioif_net_map *map; 1816 1.101 yamaguch struct mbuf *m; 1817 1.101 yamaguch int slot, len; 1818 1.101 yamaguch bool more; 1819 1.101 yamaguch size_t ndeq; 1820 1.101 yamaguch 1821 1.101 yamaguch KASSERT(mutex_owned(&netq->netq_lock)); 1822 1.101 yamaguch 1823 1.101 yamaguch more = false; 1824 1.101 yamaguch ndeq = 0; 1825 1.101 yamaguch 1826 1.101 yamaguch if (virtio_vq_is_enqueued(vsc, vq) == false) 1827 1.101 yamaguch goto done; 1828 1.101 yamaguch 1829 1.101 yamaguch for (;;ndeq++) { 1830 1.101 yamaguch if (ndeq >= limit) { 1831 1.101 yamaguch more = true; 1832 1.101 yamaguch break; 1833 1.101 yamaguch } 1834 1.101 yamaguch 1835 1.101 yamaguch if (virtio_dequeue(vsc, vq, &slot, &len) != 0) 1836 1.101 yamaguch break; 1837 1.101 yamaguch 1838 1.101 yamaguch map = &netq->netq_maps[slot]; 1839 1.101 yamaguch KASSERT(map->vnm_mbuf != NULL); 1840 1.101 yamaguch m = vioif_net_dequeue_commit(vsc, vq, slot, 1841 1.101 yamaguch map, BUS_DMASYNC_POSTREAD); 1842 1.101 yamaguch KASSERT(m != NULL); 1843 1.101 yamaguch 1844 1.101 yamaguch m->m_len = m->m_pkthdr.len = len - sc->sc_hdr_size; 1845 1.101 yamaguch m_set_rcvif(m, ifp); 1846 1.101 yamaguch if_percpuq_enqueue(ifp->if_percpuq, m); 1847 1.101 yamaguch } 1848 1.101 yamaguch 1849 1.101 yamaguch done: 1850 1.101 yamaguch if (ndeqp != NULL) 1851 1.101 yamaguch *ndeqp = ndeq; 1852 1.101 yamaguch 1853 1.101 yamaguch return more; 1854 1.101 yamaguch } 1855 1.101 yamaguch 1856 1.101 yamaguch static void 1857 1.101 yamaguch vioif_rx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc, 1858 1.101 yamaguch struct vioif_netqueue *netq) 1859 1.101 yamaguch { 1860 1.101 yamaguch struct vioif_net_map *map; 1861 1.101 yamaguch struct mbuf *m; 1862 1.101 yamaguch unsigned int i, vq_num; 1863 1.101 yamaguch bool more; 1864 1.101 yamaguch 1865 1.101 yamaguch mutex_enter(&netq->netq_lock); 1866 1.101 yamaguch 1867 1.101 yamaguch vq_num = netq->netq_vq->vq_num; 1868 1.101 yamaguch for (;;) { 1869 1.101 yamaguch more = vioif_rx_deq_locked(sc, vsc, netq, vq_num, NULL); 1870 1.101 yamaguch if (more == false) 1871 1.101 yamaguch break; 1872 1.101 yamaguch } 1873 1.101 yamaguch 1874 1.101 yamaguch for (i = 0; i < vq_num; i++) { 1875 1.101 yamaguch map = &netq->netq_maps[i]; 1876 1.101 yamaguch 1877 1.101 yamaguch m = map->vnm_mbuf; 1878 1.101 yamaguch if (m == NULL) 1879 1.101 yamaguch continue; 1880 1.101 yamaguch 1881 1.101 yamaguch vioif_net_unload_mbuf(vsc, map); 1882 1.101 yamaguch m_freem(m); 1883 1.101 yamaguch } 1884 1.101 yamaguch mutex_exit(&netq->netq_lock); 1885 1.101 yamaguch } 1886 1.101 yamaguch 1887 1.101 yamaguch static void 1888 1.101 yamaguch vioif_rx_handle_locked(void *xnetq, u_int limit) 1889 1.101 yamaguch { 1890 1.101 yamaguch struct vioif_netqueue *netq = xnetq; 1891 1.101 yamaguch struct virtqueue *vq = netq->netq_vq; 1892 1.101 yamaguch struct virtio_softc *vsc = vq->vq_owner; 1893 1.101 yamaguch struct vioif_softc *sc = device_private(virtio_child(vsc)); 1894 1.101 yamaguch bool more; 1895 1.101 yamaguch int enqueued; 1896 1.101 yamaguch size_t ndeq; 1897 1.101 yamaguch 1898 1.101 yamaguch KASSERT(mutex_owned(&netq->netq_lock)); 1899 1.101 yamaguch KASSERT(!netq->netq_stopping); 1900 1.101 yamaguch 1901 1.101 yamaguch more = vioif_rx_deq_locked(sc, vsc, netq, limit, &ndeq); 1902 1.101 yamaguch if (ndeq > 0) 1903 1.101 yamaguch vioif_populate_rx_mbufs_locked(sc, netq); 1904 1.101 yamaguch 1905 1.101 yamaguch if (more) { 1906 1.101 yamaguch vioif_net_sched_handle(sc, netq); 1907 1.101 yamaguch return; 1908 1.101 yamaguch } 1909 1.101 yamaguch 1910 1.101 yamaguch enqueued = virtio_start_vq_intr(vsc, netq->netq_vq); 1911 1.101 yamaguch if (enqueued != 0) { 1912 1.101 yamaguch virtio_stop_vq_intr(vsc, netq->netq_vq); 1913 1.101 yamaguch vioif_net_sched_handle(sc, netq); 1914 1.101 yamaguch return; 1915 1.101 yamaguch } 1916 1.101 yamaguch 1917 1.101 yamaguch netq->netq_running_handle = false; 1918 1.101 yamaguch } 1919 1.101 yamaguch 1920 1.101 yamaguch static int 1921 1.101 yamaguch vioif_rx_intr(void *arg) 1922 1.99 yamaguch { 1923 1.101 yamaguch struct vioif_netqueue *netq = arg; 1924 1.101 yamaguch struct virtqueue *vq = netq->netq_vq; 1925 1.101 yamaguch struct virtio_softc *vsc = vq->vq_owner; 1926 1.101 yamaguch struct vioif_softc *sc = device_private(virtio_child(vsc)); 1927 1.101 yamaguch u_int limit; 1928 1.99 yamaguch 1929 1.101 yamaguch mutex_enter(&netq->netq_lock); 1930 1.99 yamaguch 1931 1.101 yamaguch /* handler is already running in softint/workqueue */ 1932 1.101 yamaguch if (netq->netq_running_handle) 1933 1.101 yamaguch goto done; 1934 1.99 yamaguch 1935 1.108 yamaguch if (netq->netq_stopping) 1936 1.108 yamaguch goto done; 1937 1.108 yamaguch 1938 1.101 yamaguch netq->netq_running_handle = true; 1939 1.99 yamaguch 1940 1.101 yamaguch limit = sc->sc_rx_intr_process_limit; 1941 1.101 yamaguch virtio_stop_vq_intr(vsc, vq); 1942 1.101 yamaguch vioif_rx_handle_locked(netq, limit); 1943 1.99 yamaguch 1944 1.101 yamaguch done: 1945 1.101 yamaguch mutex_exit(&netq->netq_lock); 1946 1.101 yamaguch return 1; 1947 1.99 yamaguch } 1948 1.99 yamaguch 1949 1.99 yamaguch static void 1950 1.101 yamaguch vioif_rx_handle(void *xnetq) 1951 1.99 yamaguch { 1952 1.101 yamaguch struct vioif_netqueue *netq = xnetq; 1953 1.101 yamaguch struct virtqueue *vq = netq->netq_vq; 1954 1.101 yamaguch struct virtio_softc *vsc = vq->vq_owner; 1955 1.101 yamaguch struct vioif_softc *sc = device_private(virtio_child(vsc)); 1956 1.101 yamaguch u_int limit; 1957 1.99 yamaguch 1958 1.101 yamaguch mutex_enter(&netq->netq_lock); 1959 1.99 yamaguch 1960 1.101 yamaguch KASSERT(netq->netq_running_handle); 1961 1.99 yamaguch 1962 1.101 yamaguch if (netq->netq_stopping) { 1963 1.101 yamaguch netq->netq_running_handle = false; 1964 1.101 yamaguch goto done; 1965 1.99 yamaguch } 1966 1.99 yamaguch 1967 1.101 yamaguch limit = sc->sc_rx_process_limit; 1968 1.101 yamaguch vioif_rx_handle_locked(netq, limit); 1969 1.99 yamaguch 1970 1.101 yamaguch done: 1971 1.101 yamaguch mutex_exit(&netq->netq_lock); 1972 1.99 yamaguch } 1973 1.99 yamaguch 1974 1.1 hannken /* 1975 1.110 andvar * Transmission implementation 1976 1.1 hannken */ 1977 1.101 yamaguch /* enqueue mbufs to send */ 1978 1.1 hannken static void 1979 1.101 yamaguch vioif_send_common_locked(struct ifnet *ifp, struct vioif_netqueue *netq, 1980 1.101 yamaguch bool is_transmit) 1981 1.12 ozaki { 1982 1.101 yamaguch struct vioif_softc *sc = ifp->if_softc; 1983 1.101 yamaguch struct virtio_softc *vsc = sc->sc_virtio; 1984 1.98 yamaguch struct virtqueue *vq = netq->netq_vq; 1985 1.101 yamaguch struct vioif_tx_context *txc; 1986 1.95 yamaguch struct vioif_net_map *map; 1987 1.89 yamaguch struct mbuf *m; 1988 1.101 yamaguch int queued = 0; 1989 1.1 hannken 1990 1.98 yamaguch KASSERT(mutex_owned(&netq->netq_lock)); 1991 1.98 yamaguch 1992 1.101 yamaguch if (netq->netq_stopping || 1993 1.101 yamaguch !ISSET(ifp->if_flags, IFF_RUNNING)) 1994 1.101 yamaguch return; 1995 1.101 yamaguch 1996 1.101 yamaguch txc = netq->netq_ctx; 1997 1.101 yamaguch 1998 1.102 yamaguch if (!txc->txc_link_active || 1999 1.102 yamaguch txc->txc_no_free_slots) 2000 1.101 yamaguch return; 2001 1.7 ozaki 2002 1.101 yamaguch for (;;) { 2003 1.101 yamaguch int slot, r; 2004 1.1 hannken r = virtio_enqueue_prep(vsc, vq, &slot); 2005 1.102 yamaguch if (r == EAGAIN) { 2006 1.102 yamaguch txc->txc_no_free_slots = true; 2007 1.1 hannken break; 2008 1.102 yamaguch } 2009 1.97 yamaguch if (__predict_false(r != 0)) 2010 1.101 yamaguch panic("enqueue_prep for tx buffers"); 2011 1.101 yamaguch 2012 1.101 yamaguch if (is_transmit) 2013 1.101 yamaguch m = pcq_get(txc->txc_intrq); 2014 1.101 yamaguch else 2015 1.101 yamaguch IFQ_DEQUEUE(&ifp->if_snd, m); 2016 1.89 yamaguch 2017 1.89 yamaguch if (m == NULL) { 2018 1.91 yamaguch virtio_enqueue_abort(vsc, vq, slot); 2019 1.91 yamaguch break; 2020 1.91 yamaguch } 2021 1.89 yamaguch 2022 1.101 yamaguch map = &netq->netq_maps[slot]; 2023 1.101 yamaguch KASSERT(map->vnm_mbuf == NULL); 2024 1.89 yamaguch 2025 1.101 yamaguch r = vioif_net_load_mbuf(vsc, map, m, BUS_DMA_WRITE); 2026 1.91 yamaguch if (r != 0) { 2027 1.101 yamaguch /* maybe just too fragmented */ 2028 1.101 yamaguch struct mbuf *newm; 2029 1.101 yamaguch 2030 1.101 yamaguch newm = m_defrag(m, M_NOWAIT); 2031 1.101 yamaguch if (newm != NULL) { 2032 1.101 yamaguch m = newm; 2033 1.101 yamaguch r = vioif_net_load_mbuf(vsc, map, m, 2034 1.101 yamaguch BUS_DMA_WRITE); 2035 1.101 yamaguch } else { 2036 1.101 yamaguch txc->txc_defrag_failed.ev_count++; 2037 1.101 yamaguch r = -1; 2038 1.101 yamaguch } 2039 1.101 yamaguch 2040 1.101 yamaguch if (r != 0) { 2041 1.101 yamaguch netq->netq_mbuf_load_failed.ev_count++; 2042 1.101 yamaguch m_freem(m); 2043 1.101 yamaguch if_statinc(ifp, if_oerrors); 2044 1.101 yamaguch virtio_enqueue_abort(vsc, vq, slot); 2045 1.101 yamaguch continue; 2046 1.101 yamaguch } 2047 1.1 hannken } 2048 1.89 yamaguch 2049 1.101 yamaguch memset(map->vnm_hdr, 0, sc->sc_hdr_size); 2050 1.101 yamaguch 2051 1.101 yamaguch r = vioif_net_enqueue_tx(vsc, vq, slot, map); 2052 1.1 hannken if (r != 0) { 2053 1.101 yamaguch netq->netq_enqueue_failed.ev_count++; 2054 1.99 yamaguch vioif_net_unload_mbuf(vsc, map); 2055 1.89 yamaguch m_freem(m); 2056 1.101 yamaguch /* slot already freed by vioif_net_enqueue_tx */ 2057 1.101 yamaguch 2058 1.101 yamaguch if_statinc(ifp, if_oerrors); 2059 1.101 yamaguch continue; 2060 1.1 hannken } 2061 1.95 yamaguch 2062 1.101 yamaguch queued++; 2063 1.101 yamaguch bpf_mtap(ifp, m, BPF_D_OUT); 2064 1.1 hannken } 2065 1.99 yamaguch 2066 1.101 yamaguch if (queued > 0) { 2067 1.99 yamaguch vioif_notify(vsc, vq); 2068 1.101 yamaguch ifp->if_timer = 5; 2069 1.93 yamaguch } 2070 1.7 ozaki } 2071 1.7 ozaki 2072 1.101 yamaguch /* dequeue sent mbufs */ 2073 1.55 yamaguch static bool 2074 1.101 yamaguch vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc, 2075 1.102 yamaguch struct vioif_netqueue *netq, u_int limit, size_t *ndeqp) 2076 1.7 ozaki { 2077 1.98 yamaguch struct virtqueue *vq = netq->netq_vq; 2078 1.1 hannken struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2079 1.95 yamaguch struct vioif_net_map *map; 2080 1.1 hannken struct mbuf *m; 2081 1.1 hannken int slot, len; 2082 1.102 yamaguch bool more; 2083 1.102 yamaguch size_t ndeq; 2084 1.1 hannken 2085 1.98 yamaguch KASSERT(mutex_owned(&netq->netq_lock)); 2086 1.7 ozaki 2087 1.102 yamaguch more = false; 2088 1.102 yamaguch ndeq = 0; 2089 1.102 yamaguch 2090 1.54 yamaguch if (virtio_vq_is_enqueued(vsc, vq) == false) 2091 1.102 yamaguch goto done; 2092 1.55 yamaguch 2093 1.102 yamaguch for (;;ndeq++) { 2094 1.101 yamaguch if (limit-- == 0) { 2095 1.55 yamaguch more = true; 2096 1.55 yamaguch break; 2097 1.55 yamaguch } 2098 1.55 yamaguch 2099 1.55 yamaguch if (virtio_dequeue(vsc, vq, &slot, &len) != 0) 2100 1.55 yamaguch break; 2101 1.55 yamaguch 2102 1.98 yamaguch map = &netq->netq_maps[slot]; 2103 1.95 yamaguch KASSERT(map->vnm_mbuf != NULL); 2104 1.99 yamaguch m = vioif_net_dequeue_commit(vsc, vq, slot, 2105 1.101 yamaguch map, BUS_DMASYNC_POSTWRITE); 2106 1.99 yamaguch KASSERT(m != NULL); 2107 1.95 yamaguch 2108 1.101 yamaguch if_statinc(ifp, if_opackets); 2109 1.101 yamaguch m_freem(m); 2110 1.70 skrll } 2111 1.87 yamaguch 2112 1.102 yamaguch done: 2113 1.102 yamaguch if (ndeqp != NULL) 2114 1.102 yamaguch *ndeqp = ndeq; 2115 1.101 yamaguch return more; 2116 1.66 reinoud } 2117 1.66 reinoud 2118 1.101 yamaguch static void 2119 1.101 yamaguch vioif_tx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc, 2120 1.101 yamaguch struct vioif_netqueue *netq) 2121 1.1 hannken { 2122 1.102 yamaguch struct vioif_tx_context *txc; 2123 1.101 yamaguch struct vioif_net_map *map; 2124 1.101 yamaguch struct mbuf *m; 2125 1.101 yamaguch unsigned int i, vq_num; 2126 1.101 yamaguch bool more; 2127 1.55 yamaguch 2128 1.98 yamaguch mutex_enter(&netq->netq_lock); 2129 1.7 ozaki 2130 1.102 yamaguch txc = netq->netq_ctx; 2131 1.101 yamaguch vq_num = netq->netq_vq->vq_num; 2132 1.102 yamaguch 2133 1.101 yamaguch for (;;) { 2134 1.102 yamaguch more = vioif_tx_deq_locked(sc, vsc, netq, vq_num, NULL); 2135 1.101 yamaguch if (more == false) 2136 1.101 yamaguch break; 2137 1.101 yamaguch } 2138 1.55 yamaguch 2139 1.101 yamaguch for (i = 0; i < vq_num; i++) { 2140 1.101 yamaguch map = &netq->netq_maps[i]; 2141 1.55 yamaguch 2142 1.101 yamaguch m = map->vnm_mbuf; 2143 1.101 yamaguch if (m == NULL) 2144 1.101 yamaguch continue; 2145 1.55 yamaguch 2146 1.101 yamaguch vioif_net_unload_mbuf(vsc, map); 2147 1.101 yamaguch m_freem(m); 2148 1.101 yamaguch } 2149 1.102 yamaguch 2150 1.102 yamaguch txc->txc_no_free_slots = false; 2151 1.102 yamaguch 2152 1.98 yamaguch mutex_exit(&netq->netq_lock); 2153 1.55 yamaguch } 2154 1.55 yamaguch 2155 1.55 yamaguch static void 2156 1.101 yamaguch vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq) 2157 1.55 yamaguch { 2158 1.55 yamaguch 2159 1.101 yamaguch /* 2160 1.101 yamaguch * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 2161 1.101 yamaguch */ 2162 1.101 yamaguch vioif_send_common_locked(ifp, netq, false); 2163 1.101 yamaguch 2164 1.101 yamaguch } 2165 1.85 yamaguch 2166 1.101 yamaguch static void 2167 1.101 yamaguch vioif_transmit_locked(struct ifnet *ifp, struct vioif_netqueue *netq) 2168 1.101 yamaguch { 2169 1.55 yamaguch 2170 1.101 yamaguch vioif_send_common_locked(ifp, netq, true); 2171 1.101 yamaguch } 2172 1.1 hannken 2173 1.101 yamaguch static void 2174 1.101 yamaguch vioif_deferred_transmit(void *arg) 2175 1.101 yamaguch { 2176 1.101 yamaguch struct vioif_netqueue *netq = arg; 2177 1.101 yamaguch struct virtio_softc *vsc = netq->netq_vq->vq_owner; 2178 1.101 yamaguch struct vioif_softc *sc = device_private(virtio_child(vsc)); 2179 1.101 yamaguch struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2180 1.1 hannken 2181 1.101 yamaguch mutex_enter(&netq->netq_lock); 2182 1.101 yamaguch vioif_send_common_locked(ifp, netq, true); 2183 1.98 yamaguch mutex_exit(&netq->netq_lock); 2184 1.1 hannken } 2185 1.1 hannken 2186 1.66 reinoud static void 2187 1.98 yamaguch vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit) 2188 1.66 reinoud { 2189 1.98 yamaguch struct virtqueue *vq = netq->netq_vq; 2190 1.98 yamaguch struct vioif_tx_context *txc = netq->netq_ctx; 2191 1.66 reinoud struct virtio_softc *vsc = vq->vq_owner; 2192 1.66 reinoud struct vioif_softc *sc = device_private(virtio_child(vsc)); 2193 1.66 reinoud struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2194 1.66 reinoud bool more; 2195 1.87 yamaguch int enqueued; 2196 1.102 yamaguch size_t ndeq; 2197 1.66 reinoud 2198 1.98 yamaguch KASSERT(mutex_owned(&netq->netq_lock)); 2199 1.98 yamaguch KASSERT(!netq->netq_stopping); 2200 1.66 reinoud 2201 1.102 yamaguch more = vioif_tx_deq_locked(sc, vsc, netq, limit, &ndeq); 2202 1.102 yamaguch if (txc->txc_no_free_slots && ndeq > 0) { 2203 1.102 yamaguch txc->txc_no_free_slots = false; 2204 1.102 yamaguch softint_schedule(txc->txc_deferred_transmit); 2205 1.102 yamaguch } 2206 1.102 yamaguch 2207 1.66 reinoud if (more) { 2208 1.98 yamaguch vioif_net_sched_handle(sc, netq); 2209 1.66 reinoud return; 2210 1.66 reinoud } 2211 1.66 reinoud 2212 1.87 yamaguch enqueued = (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX) ? 2213 1.87 yamaguch virtio_postpone_intr_smart(vsc, vq): 2214 1.87 yamaguch virtio_start_vq_intr(vsc, vq); 2215 1.87 yamaguch if (enqueued != 0) { 2216 1.87 yamaguch virtio_stop_vq_intr(vsc, vq); 2217 1.98 yamaguch vioif_net_sched_handle(sc, netq); 2218 1.66 reinoud return; 2219 1.66 reinoud } 2220 1.66 reinoud 2221 1.98 yamaguch netq->netq_running_handle = false; 2222 1.84 yamaguch 2223 1.66 reinoud /* for ALTQ */ 2224 1.102 yamaguch if (netq == &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]) 2225 1.66 reinoud if_schedule_deferred_start(ifp); 2226 1.102 yamaguch 2227 1.98 yamaguch softint_schedule(txc->txc_deferred_transmit); 2228 1.66 reinoud } 2229 1.66 reinoud 2230 1.1 hannken static int 2231 1.54 yamaguch vioif_tx_intr(void *arg) 2232 1.1 hannken { 2233 1.98 yamaguch struct vioif_netqueue *netq = arg; 2234 1.98 yamaguch struct virtqueue *vq = netq->netq_vq; 2235 1.1 hannken struct virtio_softc *vsc = vq->vq_owner; 2236 1.32 jdolecek struct vioif_softc *sc = device_private(virtio_child(vsc)); 2237 1.55 yamaguch u_int limit; 2238 1.55 yamaguch 2239 1.98 yamaguch mutex_enter(&netq->netq_lock); 2240 1.7 ozaki 2241 1.84 yamaguch /* tx handler is already running in softint/workqueue */ 2242 1.98 yamaguch if (netq->netq_running_handle) 2243 1.84 yamaguch goto done; 2244 1.7 ozaki 2245 1.98 yamaguch if (netq->netq_stopping) 2246 1.84 yamaguch goto done; 2247 1.55 yamaguch 2248 1.98 yamaguch netq->netq_running_handle = true; 2249 1.55 yamaguch 2250 1.84 yamaguch virtio_stop_vq_intr(vsc, vq); 2251 1.98 yamaguch netq->netq_workqueue = sc->sc_txrx_workqueue_sysctl; 2252 1.98 yamaguch limit = sc->sc_tx_intr_process_limit; 2253 1.98 yamaguch vioif_tx_handle_locked(netq, limit); 2254 1.7 ozaki 2255 1.84 yamaguch done: 2256 1.98 yamaguch mutex_exit(&netq->netq_lock); 2257 1.55 yamaguch return 1; 2258 1.55 yamaguch } 2259 1.55 yamaguch 2260 1.55 yamaguch static void 2261 1.98 yamaguch vioif_tx_handle(void *xnetq) 2262 1.55 yamaguch { 2263 1.98 yamaguch struct vioif_netqueue *netq = xnetq; 2264 1.98 yamaguch struct virtqueue *vq = netq->netq_vq; 2265 1.55 yamaguch struct virtio_softc *vsc = vq->vq_owner; 2266 1.55 yamaguch struct vioif_softc *sc = device_private(virtio_child(vsc)); 2267 1.55 yamaguch u_int limit; 2268 1.55 yamaguch 2269 1.98 yamaguch mutex_enter(&netq->netq_lock); 2270 1.85 yamaguch 2271 1.98 yamaguch KASSERT(netq->netq_running_handle); 2272 1.85 yamaguch 2273 1.98 yamaguch if (netq->netq_stopping) { 2274 1.98 yamaguch netq->netq_running_handle = false; 2275 1.85 yamaguch goto done; 2276 1.85 yamaguch } 2277 1.85 yamaguch 2278 1.55 yamaguch limit = sc->sc_tx_process_limit; 2279 1.98 yamaguch vioif_tx_handle_locked(netq, limit); 2280 1.55 yamaguch 2281 1.85 yamaguch done: 2282 1.98 yamaguch mutex_exit(&netq->netq_lock); 2283 1.7 ozaki } 2284 1.7 ozaki 2285 1.1 hannken /* 2286 1.1 hannken * Control vq 2287 1.1 hannken */ 2288 1.1 hannken /* issue a VIRTIO_NET_CTRL_RX class command and wait for completion */ 2289 1.44 yamaguch static void 2290 1.44 yamaguch vioif_ctrl_acquire(struct vioif_softc *sc) 2291 1.1 hannken { 2292 1.43 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2293 1.1 hannken 2294 1.43 yamaguch mutex_enter(&ctrlq->ctrlq_wait_lock); 2295 1.43 yamaguch while (ctrlq->ctrlq_inuse != FREE) 2296 1.43 yamaguch cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock); 2297 1.43 yamaguch ctrlq->ctrlq_inuse = INUSE; 2298 1.44 yamaguch ctrlq->ctrlq_owner = curlwp; 2299 1.44 yamaguch mutex_exit(&ctrlq->ctrlq_wait_lock); 2300 1.44 yamaguch } 2301 1.44 yamaguch 2302 1.44 yamaguch static void 2303 1.44 yamaguch vioif_ctrl_release(struct vioif_softc *sc) 2304 1.44 yamaguch { 2305 1.44 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2306 1.44 yamaguch 2307 1.44 yamaguch KASSERT(ctrlq->ctrlq_inuse != FREE); 2308 1.44 yamaguch KASSERT(ctrlq->ctrlq_owner == curlwp); 2309 1.44 yamaguch 2310 1.44 yamaguch mutex_enter(&ctrlq->ctrlq_wait_lock); 2311 1.44 yamaguch ctrlq->ctrlq_inuse = FREE; 2312 1.46 yamaguch ctrlq->ctrlq_owner = NULL; 2313 1.44 yamaguch cv_signal(&ctrlq->ctrlq_wait); 2314 1.43 yamaguch mutex_exit(&ctrlq->ctrlq_wait_lock); 2315 1.44 yamaguch } 2316 1.44 yamaguch 2317 1.44 yamaguch static int 2318 1.44 yamaguch vioif_ctrl_load_cmdspec(struct vioif_softc *sc, 2319 1.44 yamaguch struct vioif_ctrl_cmdspec *specs, int nspecs) 2320 1.44 yamaguch { 2321 1.44 yamaguch struct virtio_softc *vsc = sc->sc_virtio; 2322 1.44 yamaguch int i, r, loaded; 2323 1.44 yamaguch 2324 1.44 yamaguch loaded = 0; 2325 1.44 yamaguch for (i = 0; i < nspecs; i++) { 2326 1.44 yamaguch r = bus_dmamap_load(virtio_dmat(vsc), 2327 1.44 yamaguch specs[i].dmamap, specs[i].buf, specs[i].bufsize, 2328 1.48 msaitoh NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2329 1.44 yamaguch if (r) { 2330 1.63 yamaguch sc->sc_ctrlq.ctrlq_cmd_load_failed.ev_count++; 2331 1.44 yamaguch goto err; 2332 1.44 yamaguch } 2333 1.44 yamaguch loaded++; 2334 1.44 yamaguch 2335 1.44 yamaguch } 2336 1.44 yamaguch 2337 1.44 yamaguch return r; 2338 1.44 yamaguch 2339 1.44 yamaguch err: 2340 1.44 yamaguch for (i = 0; i < loaded; i++) { 2341 1.44 yamaguch bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap); 2342 1.44 yamaguch } 2343 1.44 yamaguch 2344 1.44 yamaguch return r; 2345 1.44 yamaguch } 2346 1.44 yamaguch 2347 1.44 yamaguch static void 2348 1.44 yamaguch vioif_ctrl_unload_cmdspec(struct vioif_softc *sc, 2349 1.44 yamaguch struct vioif_ctrl_cmdspec *specs, int nspecs) 2350 1.44 yamaguch { 2351 1.44 yamaguch struct virtio_softc *vsc = sc->sc_virtio; 2352 1.44 yamaguch int i; 2353 1.44 yamaguch 2354 1.44 yamaguch for (i = 0; i < nspecs; i++) { 2355 1.44 yamaguch bus_dmamap_unload(virtio_dmat(vsc), specs[i].dmamap); 2356 1.44 yamaguch } 2357 1.44 yamaguch } 2358 1.44 yamaguch 2359 1.44 yamaguch static int 2360 1.44 yamaguch vioif_ctrl_send_command(struct vioif_softc *sc, uint8_t class, uint8_t cmd, 2361 1.44 yamaguch struct vioif_ctrl_cmdspec *specs, int nspecs) 2362 1.44 yamaguch { 2363 1.44 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2364 1.44 yamaguch struct virtqueue *vq = ctrlq->ctrlq_vq; 2365 1.44 yamaguch struct virtio_softc *vsc = sc->sc_virtio; 2366 1.44 yamaguch int i, r, slot; 2367 1.43 yamaguch 2368 1.44 yamaguch ctrlq->ctrlq_cmd->class = class; 2369 1.43 yamaguch ctrlq->ctrlq_cmd->command = cmd; 2370 1.1 hannken 2371 1.43 yamaguch bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 2372 1.50 christos 0, sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_PREWRITE); 2373 1.44 yamaguch for (i = 0; i < nspecs; i++) { 2374 1.44 yamaguch bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 2375 1.50 christos 0, specs[i].bufsize, BUS_DMASYNC_PREWRITE); 2376 1.44 yamaguch } 2377 1.43 yamaguch bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 2378 1.50 christos 0, sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_PREREAD); 2379 1.1 hannken 2380 1.66 reinoud /* we need to explicitly (re)start vq intr when using RING EVENT IDX */ 2381 1.66 reinoud if (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX) 2382 1.66 reinoud virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq); 2383 1.66 reinoud 2384 1.1 hannken r = virtio_enqueue_prep(vsc, vq, &slot); 2385 1.1 hannken if (r != 0) 2386 1.1 hannken panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 2387 1.44 yamaguch r = virtio_enqueue_reserve(vsc, vq, slot, nspecs + 2); 2388 1.1 hannken if (r != 0) 2389 1.1 hannken panic("%s: control vq busy!?", device_xname(sc->sc_dev)); 2390 1.43 yamaguch virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_cmd_dmamap, true); 2391 1.44 yamaguch for (i = 0; i < nspecs; i++) { 2392 1.44 yamaguch virtio_enqueue(vsc, vq, slot, specs[i].dmamap, true); 2393 1.44 yamaguch } 2394 1.43 yamaguch virtio_enqueue(vsc, vq, slot, ctrlq->ctrlq_status_dmamap, false); 2395 1.1 hannken virtio_enqueue_commit(vsc, vq, slot, true); 2396 1.1 hannken 2397 1.1 hannken /* wait for done */ 2398 1.43 yamaguch mutex_enter(&ctrlq->ctrlq_wait_lock); 2399 1.43 yamaguch while (ctrlq->ctrlq_inuse != DONE) 2400 1.43 yamaguch cv_wait(&ctrlq->ctrlq_wait, &ctrlq->ctrlq_wait_lock); 2401 1.43 yamaguch mutex_exit(&ctrlq->ctrlq_wait_lock); 2402 1.109 andvar /* already dequeued */ 2403 1.1 hannken 2404 1.43 yamaguch bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_cmd_dmamap, 0, 2405 1.50 christos sizeof(struct virtio_net_ctrl_cmd), BUS_DMASYNC_POSTWRITE); 2406 1.44 yamaguch for (i = 0; i < nspecs; i++) { 2407 1.44 yamaguch bus_dmamap_sync(virtio_dmat(vsc), specs[i].dmamap, 0, 2408 1.50 christos specs[i].bufsize, BUS_DMASYNC_POSTWRITE); 2409 1.44 yamaguch } 2410 1.43 yamaguch bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap, 0, 2411 1.50 christos sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_POSTREAD); 2412 1.1 hannken 2413 1.43 yamaguch if (ctrlq->ctrlq_status->ack == VIRTIO_NET_OK) 2414 1.1 hannken r = 0; 2415 1.1 hannken else { 2416 1.63 yamaguch device_printf(sc->sc_dev, "failed setting rx mode\n"); 2417 1.63 yamaguch sc->sc_ctrlq.ctrlq_cmd_failed.ev_count++; 2418 1.1 hannken r = EIO; 2419 1.1 hannken } 2420 1.1 hannken 2421 1.44 yamaguch return r; 2422 1.44 yamaguch } 2423 1.44 yamaguch 2424 1.101 yamaguch /* ctrl vq interrupt; wake up the command issuer */ 2425 1.101 yamaguch static int 2426 1.101 yamaguch vioif_ctrl_intr(void *arg) 2427 1.101 yamaguch { 2428 1.101 yamaguch struct vioif_ctrlqueue *ctrlq = arg; 2429 1.101 yamaguch struct virtqueue *vq = ctrlq->ctrlq_vq; 2430 1.101 yamaguch struct virtio_softc *vsc = vq->vq_owner; 2431 1.101 yamaguch int r, slot; 2432 1.101 yamaguch 2433 1.101 yamaguch if (virtio_vq_is_enqueued(vsc, vq) == false) 2434 1.101 yamaguch return 0; 2435 1.101 yamaguch 2436 1.101 yamaguch r = virtio_dequeue(vsc, vq, &slot, NULL); 2437 1.101 yamaguch if (r == ENOENT) 2438 1.101 yamaguch return 0; 2439 1.101 yamaguch virtio_dequeue_commit(vsc, vq, slot); 2440 1.101 yamaguch 2441 1.101 yamaguch mutex_enter(&ctrlq->ctrlq_wait_lock); 2442 1.101 yamaguch ctrlq->ctrlq_inuse = DONE; 2443 1.101 yamaguch cv_signal(&ctrlq->ctrlq_wait); 2444 1.101 yamaguch mutex_exit(&ctrlq->ctrlq_wait_lock); 2445 1.101 yamaguch 2446 1.101 yamaguch return 1; 2447 1.101 yamaguch } 2448 1.101 yamaguch 2449 1.44 yamaguch static int 2450 1.44 yamaguch vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff) 2451 1.44 yamaguch { 2452 1.44 yamaguch struct virtio_net_ctrl_rx *rx = sc->sc_ctrlq.ctrlq_rx; 2453 1.44 yamaguch struct vioif_ctrl_cmdspec specs[1]; 2454 1.44 yamaguch int r; 2455 1.44 yamaguch 2456 1.44 yamaguch if (!sc->sc_has_ctrl) 2457 1.44 yamaguch return ENOTSUP; 2458 1.44 yamaguch 2459 1.44 yamaguch vioif_ctrl_acquire(sc); 2460 1.44 yamaguch 2461 1.44 yamaguch rx->onoff = onoff; 2462 1.44 yamaguch specs[0].dmamap = sc->sc_ctrlq.ctrlq_rx_dmamap; 2463 1.44 yamaguch specs[0].buf = rx; 2464 1.44 yamaguch specs[0].bufsize = sizeof(*rx); 2465 1.44 yamaguch 2466 1.44 yamaguch r = vioif_ctrl_send_command(sc, VIRTIO_NET_CTRL_RX, cmd, 2467 1.44 yamaguch specs, __arraycount(specs)); 2468 1.3 christos 2469 1.44 yamaguch vioif_ctrl_release(sc); 2470 1.1 hannken return r; 2471 1.1 hannken } 2472 1.1 hannken 2473 1.1 hannken static int 2474 1.1 hannken vioif_set_promisc(struct vioif_softc *sc, bool onoff) 2475 1.1 hannken { 2476 1.50 christos return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_PROMISC, onoff); 2477 1.1 hannken } 2478 1.1 hannken 2479 1.1 hannken static int 2480 1.1 hannken vioif_set_allmulti(struct vioif_softc *sc, bool onoff) 2481 1.1 hannken { 2482 1.50 christos return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff); 2483 1.1 hannken } 2484 1.1 hannken 2485 1.1 hannken static int 2486 1.101 yamaguch vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs) 2487 1.1 hannken { 2488 1.101 yamaguch struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq; 2489 1.101 yamaguch struct vioif_ctrl_cmdspec specs[1]; 2490 1.44 yamaguch int r; 2491 1.44 yamaguch 2492 1.32 jdolecek if (!sc->sc_has_ctrl) 2493 1.1 hannken return ENOTSUP; 2494 1.1 hannken 2495 1.101 yamaguch if (nvq_pairs <= 1) 2496 1.101 yamaguch return EINVAL; 2497 1.101 yamaguch 2498 1.44 yamaguch vioif_ctrl_acquire(sc); 2499 1.1 hannken 2500 1.101 yamaguch mq->virtqueue_pairs = virtio_rw16(sc->sc_virtio, nvq_pairs); 2501 1.101 yamaguch specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap; 2502 1.101 yamaguch specs[0].buf = mq; 2503 1.101 yamaguch specs[0].bufsize = sizeof(*mq); 2504 1.1 hannken 2505 1.44 yamaguch r = vioif_ctrl_send_command(sc, 2506 1.101 yamaguch VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, 2507 1.101 yamaguch specs, __arraycount(specs)); 2508 1.1 hannken 2509 1.44 yamaguch vioif_ctrl_release(sc); 2510 1.3 christos 2511 1.1 hannken return r; 2512 1.1 hannken } 2513 1.1 hannken 2514 1.46 yamaguch static int 2515 1.74 yamaguch vioif_set_mac_addr(struct vioif_softc *sc) 2516 1.74 yamaguch { 2517 1.74 yamaguch struct virtio_net_ctrl_mac_addr *ma = 2518 1.74 yamaguch sc->sc_ctrlq.ctrlq_mac_addr; 2519 1.74 yamaguch struct vioif_ctrl_cmdspec specs[1]; 2520 1.74 yamaguch struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2521 1.74 yamaguch int nspecs = __arraycount(specs); 2522 1.78 yamaguch uint64_t features; 2523 1.74 yamaguch int r; 2524 1.78 yamaguch size_t i; 2525 1.74 yamaguch 2526 1.74 yamaguch if (!sc->sc_has_ctrl) 2527 1.74 yamaguch return ENOTSUP; 2528 1.74 yamaguch 2529 1.78 yamaguch if (memcmp(CLLADDR(ifp->if_sadl), sc->sc_mac, 2530 1.78 yamaguch ETHER_ADDR_LEN) == 0) { 2531 1.78 yamaguch return 0; 2532 1.78 yamaguch } 2533 1.74 yamaguch 2534 1.78 yamaguch memcpy(sc->sc_mac, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2535 1.74 yamaguch 2536 1.78 yamaguch features = virtio_features(sc->sc_virtio); 2537 1.78 yamaguch if (features & VIRTIO_NET_F_CTRL_MAC_ADDR) { 2538 1.78 yamaguch vioif_ctrl_acquire(sc); 2539 1.78 yamaguch 2540 1.78 yamaguch memcpy(ma->mac, sc->sc_mac, ETHER_ADDR_LEN); 2541 1.78 yamaguch specs[0].dmamap = sc->sc_ctrlq.ctrlq_mac_addr_dmamap; 2542 1.78 yamaguch specs[0].buf = ma; 2543 1.78 yamaguch specs[0].bufsize = sizeof(*ma); 2544 1.78 yamaguch 2545 1.78 yamaguch r = vioif_ctrl_send_command(sc, 2546 1.78 yamaguch VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, 2547 1.78 yamaguch specs, nspecs); 2548 1.74 yamaguch 2549 1.78 yamaguch vioif_ctrl_release(sc); 2550 1.78 yamaguch } else { 2551 1.78 yamaguch for (i = 0; i < __arraycount(sc->sc_mac); i++) { 2552 1.78 yamaguch virtio_write_device_config_1(sc->sc_virtio, 2553 1.78 yamaguch VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]); 2554 1.78 yamaguch } 2555 1.78 yamaguch r = 0; 2556 1.78 yamaguch } 2557 1.74 yamaguch 2558 1.74 yamaguch return r; 2559 1.74 yamaguch } 2560 1.74 yamaguch 2561 1.74 yamaguch static int 2562 1.101 yamaguch vioif_set_rx_filter(struct vioif_softc *sc) 2563 1.1 hannken { 2564 1.101 yamaguch /* filter already set in ctrlq->ctrlq_mac_tbl */ 2565 1.101 yamaguch struct virtio_softc *vsc = sc->sc_virtio; 2566 1.101 yamaguch struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc; 2567 1.101 yamaguch struct vioif_ctrl_cmdspec specs[2]; 2568 1.101 yamaguch int nspecs = __arraycount(specs); 2569 1.101 yamaguch int r; 2570 1.1 hannken 2571 1.101 yamaguch mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc; 2572 1.101 yamaguch mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc; 2573 1.54 yamaguch 2574 1.101 yamaguch if (!sc->sc_has_ctrl) 2575 1.101 yamaguch return ENOTSUP; 2576 1.1 hannken 2577 1.101 yamaguch vioif_ctrl_acquire(sc); 2578 1.1 hannken 2579 1.101 yamaguch specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap; 2580 1.101 yamaguch specs[0].buf = mac_tbl_uc; 2581 1.101 yamaguch specs[0].bufsize = sizeof(*mac_tbl_uc) 2582 1.101 yamaguch + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_uc->nentries)); 2583 1.1 hannken 2584 1.101 yamaguch specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap; 2585 1.101 yamaguch specs[1].buf = mac_tbl_mc; 2586 1.101 yamaguch specs[1].bufsize = sizeof(*mac_tbl_mc) 2587 1.101 yamaguch + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_mc->nentries)); 2588 1.75 yamaguch 2589 1.101 yamaguch r = vioif_ctrl_load_cmdspec(sc, specs, nspecs); 2590 1.101 yamaguch if (r != 0) 2591 1.101 yamaguch goto out; 2592 1.75 yamaguch 2593 1.101 yamaguch r = vioif_ctrl_send_command(sc, 2594 1.101 yamaguch VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, 2595 1.101 yamaguch specs, nspecs); 2596 1.75 yamaguch 2597 1.101 yamaguch vioif_ctrl_unload_cmdspec(sc, specs, nspecs); 2598 1.75 yamaguch 2599 1.101 yamaguch out: 2600 1.101 yamaguch vioif_ctrl_release(sc); 2601 1.75 yamaguch 2602 1.101 yamaguch return r; 2603 1.75 yamaguch } 2604 1.75 yamaguch 2605 1.1 hannken /* 2606 1.1 hannken * If multicast filter small enough (<=MAXENTRIES) set rx filter 2607 1.1 hannken * If large multicast filter exist use ALLMULTI 2608 1.1 hannken * If setting rx filter fails fall back to ALLMULTI 2609 1.1 hannken */ 2610 1.1 hannken static int 2611 1.1 hannken vioif_rx_filter(struct vioif_softc *sc) 2612 1.1 hannken { 2613 1.66 reinoud struct virtio_softc *vsc = sc->sc_virtio; 2614 1.48 msaitoh struct ethercom *ec = &sc->sc_ethercom; 2615 1.48 msaitoh struct ifnet *ifp = &ec->ec_if; 2616 1.1 hannken struct ether_multi *enm; 2617 1.1 hannken struct ether_multistep step; 2618 1.43 yamaguch struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq; 2619 1.1 hannken int nentries; 2620 1.75 yamaguch bool allmulti = 0; 2621 1.1 hannken int r; 2622 1.1 hannken 2623 1.75 yamaguch if (!sc->sc_has_ctrl) { 2624 1.75 yamaguch goto set_ifflags; 2625 1.1 hannken } 2626 1.1 hannken 2627 1.74 yamaguch memcpy(ctrlq->ctrlq_mac_tbl_uc->macs[0], 2628 1.74 yamaguch CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2629 1.74 yamaguch 2630 1.75 yamaguch nentries = 0; 2631 1.75 yamaguch allmulti = false; 2632 1.75 yamaguch 2633 1.48 msaitoh ETHER_LOCK(ec); 2634 1.75 yamaguch for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 2635 1.75 yamaguch ETHER_NEXT_MULTI(step, enm)) { 2636 1.1 hannken if (nentries >= VIRTIO_NET_CTRL_MAC_MAXENTRIES) { 2637 1.75 yamaguch allmulti = true; 2638 1.75 yamaguch break; 2639 1.1 hannken } 2640 1.50 christos if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2641 1.75 yamaguch allmulti = true; 2642 1.75 yamaguch break; 2643 1.1 hannken } 2644 1.75 yamaguch 2645 1.43 yamaguch memcpy(ctrlq->ctrlq_mac_tbl_mc->macs[nentries], 2646 1.50 christos enm->enm_addrlo, ETHER_ADDR_LEN); 2647 1.75 yamaguch nentries++; 2648 1.1 hannken } 2649 1.48 msaitoh ETHER_UNLOCK(ec); 2650 1.30 ozaki 2651 1.74 yamaguch r = vioif_set_mac_addr(sc); 2652 1.74 yamaguch if (r != 0) { 2653 1.74 yamaguch log(LOG_WARNING, "%s: couldn't set MAC address\n", 2654 1.74 yamaguch ifp->if_xname); 2655 1.74 yamaguch } 2656 1.74 yamaguch 2657 1.75 yamaguch if (!allmulti) { 2658 1.74 yamaguch ctrlq->ctrlq_mac_tbl_uc->nentries = virtio_rw32(vsc, 1); 2659 1.66 reinoud ctrlq->ctrlq_mac_tbl_mc->nentries = virtio_rw32(vsc, nentries); 2660 1.1 hannken r = vioif_set_rx_filter(sc); 2661 1.1 hannken if (r != 0) { 2662 1.75 yamaguch allmulti = true; /* fallback */ 2663 1.1 hannken } 2664 1.75 yamaguch } 2665 1.75 yamaguch 2666 1.75 yamaguch if (allmulti) { 2667 1.66 reinoud ctrlq->ctrlq_mac_tbl_uc->nentries = virtio_rw32(vsc, 0); 2668 1.66 reinoud ctrlq->ctrlq_mac_tbl_mc->nentries = virtio_rw32(vsc, 0); 2669 1.1 hannken r = vioif_set_rx_filter(sc); 2670 1.1 hannken if (r != 0) { 2671 1.75 yamaguch log(LOG_DEBUG, "%s: couldn't clear RX filter\n", 2672 1.75 yamaguch ifp->if_xname); 2673 1.75 yamaguch /* what to do on failure? */ 2674 1.1 hannken } 2675 1.75 yamaguch 2676 1.75 yamaguch ifp->if_flags |= IFF_ALLMULTI; 2677 1.1 hannken } 2678 1.75 yamaguch 2679 1.75 yamaguch set_ifflags: 2680 1.75 yamaguch r = vioif_ifflags(sc); 2681 1.1 hannken 2682 1.1 hannken return r; 2683 1.1 hannken } 2684 1.1 hannken 2685 1.101 yamaguch /* 2686 1.101 yamaguch * VM configuration changes 2687 1.101 yamaguch */ 2688 1.101 yamaguch static int 2689 1.101 yamaguch vioif_config_change(struct virtio_softc *vsc) 2690 1.101 yamaguch { 2691 1.101 yamaguch struct vioif_softc *sc = device_private(virtio_child(vsc)); 2692 1.101 yamaguch 2693 1.101 yamaguch softint_schedule(sc->sc_cfg_softint); 2694 1.101 yamaguch return 0; 2695 1.101 yamaguch } 2696 1.101 yamaguch 2697 1.101 yamaguch static void 2698 1.101 yamaguch vioif_cfg_softint(void *arg) 2699 1.101 yamaguch { 2700 1.101 yamaguch struct vioif_softc *sc = arg; 2701 1.101 yamaguch struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2702 1.101 yamaguch 2703 1.101 yamaguch vioif_update_link_status(sc); 2704 1.101 yamaguch vioif_start(ifp); 2705 1.101 yamaguch } 2706 1.101 yamaguch 2707 1.82 knakahar static int 2708 1.82 knakahar vioif_get_link_status(struct vioif_softc *sc) 2709 1.34 ozaki { 2710 1.34 ozaki struct virtio_softc *vsc = sc->sc_virtio; 2711 1.34 ozaki uint16_t status; 2712 1.34 ozaki 2713 1.34 ozaki if (virtio_features(vsc) & VIRTIO_NET_F_STATUS) 2714 1.34 ozaki status = virtio_read_device_config_2(vsc, 2715 1.34 ozaki VIRTIO_NET_CONFIG_STATUS); 2716 1.34 ozaki else 2717 1.34 ozaki status = VIRTIO_NET_S_LINK_UP; 2718 1.34 ozaki 2719 1.82 knakahar if ((status & VIRTIO_NET_S_LINK_UP) != 0) 2720 1.82 knakahar return LINK_STATE_UP; 2721 1.82 knakahar 2722 1.82 knakahar return LINK_STATE_DOWN; 2723 1.34 ozaki } 2724 1.34 ozaki 2725 1.34 ozaki static void 2726 1.34 ozaki vioif_update_link_status(struct vioif_softc *sc) 2727 1.34 ozaki { 2728 1.34 ozaki struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2729 1.98 yamaguch struct vioif_netqueue *netq; 2730 1.98 yamaguch struct vioif_tx_context *txc; 2731 1.82 knakahar bool active; 2732 1.46 yamaguch int link, i; 2733 1.34 ozaki 2734 1.62 yamaguch mutex_enter(&sc->sc_lock); 2735 1.62 yamaguch 2736 1.82 knakahar link = vioif_get_link_status(sc); 2737 1.34 ozaki 2738 1.82 knakahar if (link == sc->sc_link_state) 2739 1.82 knakahar goto done; 2740 1.34 ozaki 2741 1.82 knakahar sc->sc_link_state = link; 2742 1.82 knakahar 2743 1.82 knakahar active = VIOIF_IS_LINK_ACTIVE(sc); 2744 1.82 knakahar for (i = 0; i < sc->sc_act_nvq_pairs; i++) { 2745 1.98 yamaguch netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)]; 2746 1.34 ozaki 2747 1.98 yamaguch mutex_enter(&netq->netq_lock); 2748 1.98 yamaguch txc = netq->netq_ctx; 2749 1.98 yamaguch txc->txc_link_active = active; 2750 1.98 yamaguch mutex_exit(&netq->netq_lock); 2751 1.34 ozaki } 2752 1.34 ozaki 2753 1.82 knakahar if_link_state_change(ifp, sc->sc_link_state); 2754 1.62 yamaguch 2755 1.82 knakahar done: 2756 1.62 yamaguch mutex_exit(&sc->sc_lock); 2757 1.34 ozaki } 2758 1.34 ozaki 2759 1.101 yamaguch static void 2760 1.101 yamaguch vioif_workq_work(struct work *wk, void *context) 2761 1.34 ozaki { 2762 1.101 yamaguch struct vioif_work *work; 2763 1.34 ozaki 2764 1.101 yamaguch work = container_of(wk, struct vioif_work, cookie); 2765 1.34 ozaki 2766 1.101 yamaguch atomic_store_relaxed(&work->added, 0); 2767 1.101 yamaguch work->func(work->arg); 2768 1.34 ozaki } 2769 1.34 ozaki 2770 1.55 yamaguch static struct workqueue * 2771 1.55 yamaguch vioif_workq_create(const char *name, pri_t prio, int ipl, int flags) 2772 1.55 yamaguch { 2773 1.55 yamaguch struct workqueue *wq; 2774 1.55 yamaguch int error; 2775 1.55 yamaguch 2776 1.55 yamaguch error = workqueue_create(&wq, name, vioif_workq_work, NULL, 2777 1.55 yamaguch prio, ipl, flags); 2778 1.55 yamaguch 2779 1.55 yamaguch if (error) 2780 1.55 yamaguch return NULL; 2781 1.55 yamaguch 2782 1.55 yamaguch return wq; 2783 1.55 yamaguch } 2784 1.55 yamaguch 2785 1.55 yamaguch static void 2786 1.55 yamaguch vioif_workq_destroy(struct workqueue *wq) 2787 1.55 yamaguch { 2788 1.55 yamaguch 2789 1.55 yamaguch workqueue_destroy(wq); 2790 1.55 yamaguch } 2791 1.55 yamaguch 2792 1.55 yamaguch static void 2793 1.55 yamaguch vioif_work_set(struct vioif_work *work, void (*func)(void *), void *arg) 2794 1.55 yamaguch { 2795 1.55 yamaguch 2796 1.55 yamaguch memset(work, 0, sizeof(*work)); 2797 1.55 yamaguch work->func = func; 2798 1.55 yamaguch work->arg = arg; 2799 1.55 yamaguch } 2800 1.55 yamaguch 2801 1.55 yamaguch static void 2802 1.55 yamaguch vioif_work_add(struct workqueue *wq, struct vioif_work *work) 2803 1.55 yamaguch { 2804 1.55 yamaguch 2805 1.55 yamaguch if (atomic_load_relaxed(&work->added) != 0) 2806 1.55 yamaguch return; 2807 1.55 yamaguch 2808 1.55 yamaguch atomic_store_relaxed(&work->added, 1); 2809 1.55 yamaguch kpreempt_disable(); 2810 1.55 yamaguch workqueue_enqueue(wq, &work->cookie, NULL); 2811 1.55 yamaguch kpreempt_enable(); 2812 1.55 yamaguch } 2813 1.55 yamaguch 2814 1.55 yamaguch static void 2815 1.55 yamaguch vioif_work_wait(struct workqueue *wq, struct vioif_work *work) 2816 1.55 yamaguch { 2817 1.55 yamaguch 2818 1.55 yamaguch workqueue_wait(wq, &work->cookie); 2819 1.55 yamaguch } 2820 1.55 yamaguch 2821 1.26 pgoyette MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio"); 2822 1.48 msaitoh 2823 1.26 pgoyette #ifdef _MODULE 2824 1.26 pgoyette #include "ioconf.c" 2825 1.26 pgoyette #endif 2826 1.48 msaitoh 2827 1.48 msaitoh static int 2828 1.26 pgoyette if_vioif_modcmd(modcmd_t cmd, void *opaque) 2829 1.26 pgoyette { 2830 1.26 pgoyette int error = 0; 2831 1.48 msaitoh 2832 1.26 pgoyette #ifdef _MODULE 2833 1.26 pgoyette switch (cmd) { 2834 1.26 pgoyette case MODULE_CMD_INIT: 2835 1.48 msaitoh error = config_init_component(cfdriver_ioconf_if_vioif, 2836 1.48 msaitoh cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif); 2837 1.26 pgoyette break; 2838 1.26 pgoyette case MODULE_CMD_FINI: 2839 1.26 pgoyette error = config_fini_component(cfdriver_ioconf_if_vioif, 2840 1.26 pgoyette cfattach_ioconf_if_vioif, cfdata_ioconf_if_vioif); 2841 1.26 pgoyette break; 2842 1.26 pgoyette default: 2843 1.26 pgoyette error = ENOTTY; 2844 1.48 msaitoh break; 2845 1.26 pgoyette } 2846 1.26 pgoyette #endif 2847 1.48 msaitoh 2848 1.26 pgoyette return error; 2849 1.26 pgoyette } 2850