1 1.84 riastrad /* $NetBSD: virtio.c,v 1.84 2025/09/06 02:56:18 riastradh Exp $ */ 2 1.1 hannken 3 1.1 hannken /* 4 1.43 reinoud * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 1.43 reinoud * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg. 6 1.1 hannken * Copyright (c) 2010 Minoura Makoto. 7 1.1 hannken * All rights reserved. 8 1.1 hannken * 9 1.1 hannken * Redistribution and use in source and binary forms, with or without 10 1.1 hannken * modification, are permitted provided that the following conditions 11 1.1 hannken * are met: 12 1.1 hannken * 1. Redistributions of source code must retain the above copyright 13 1.1 hannken * notice, this list of conditions and the following disclaimer. 14 1.1 hannken * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 hannken * notice, this list of conditions and the following disclaimer in the 16 1.1 hannken * documentation and/or other materials provided with the distribution. 17 1.1 hannken * 18 1.1 hannken * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 1.1 hannken * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 1.1 hannken * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 1.1 hannken * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 1.1 hannken * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 1.1 hannken * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 1.1 hannken * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 1.1 hannken * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 1.1 hannken * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 1.1 hannken * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 1.1 hannken */ 29 1.1 hannken 30 1.1 hannken #include <sys/cdefs.h> 31 1.84 riastrad __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.84 2025/09/06 02:56:18 riastradh Exp $"); 32 1.1 hannken 33 1.1 hannken #include <sys/param.h> 34 1.1 hannken #include <sys/systm.h> 35 1.1 hannken #include <sys/kernel.h> 36 1.1 hannken #include <sys/atomic.h> 37 1.1 hannken #include <sys/bus.h> 38 1.1 hannken #include <sys/device.h> 39 1.1 hannken #include <sys/kmem.h> 40 1.18 pgoyette #include <sys/module.h> 41 1.84 riastrad #include <sys/paravirt_membar.h> 42 1.1 hannken 43 1.22 jdolecek #define VIRTIO_PRIVATE 44 1.22 jdolecek 45 1.29 cherry #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 46 1.29 cherry #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 47 1.1 hannken 48 1.1 hannken #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ 49 1.1 hannken 50 1.73 yamaguch /* 51 1.73 yamaguch * The maximum descriptor size is 2^15. Use that value as the end of 52 1.73 yamaguch * descriptor chain terminator since it will never be a valid index 53 1.73 yamaguch * in the descriptor table. 54 1.73 yamaguch */ 55 1.73 yamaguch #define VRING_DESC_CHAIN_END 32768 56 1.73 yamaguch 57 1.43 reinoud /* incomplete list */ 58 1.43 reinoud static const char *virtio_device_name[] = { 59 1.43 reinoud "unknown (0)", /* 0 */ 60 1.43 reinoud "network", /* 1 */ 61 1.43 reinoud "block", /* 2 */ 62 1.43 reinoud "console", /* 3 */ 63 1.43 reinoud "entropy", /* 4 */ 64 1.43 reinoud "memory balloon", /* 5 */ 65 1.43 reinoud "I/O memory", /* 6 */ 66 1.43 reinoud "remote processor messaging", /* 7 */ 67 1.43 reinoud "SCSI", /* 8 */ 68 1.43 reinoud "9P transport", /* 9 */ 69 1.83 martin NULL, /* 10 */ 70 1.83 martin NULL, /* 11 */ 71 1.83 martin NULL, /* 12 */ 72 1.83 martin NULL, /* 13 */ 73 1.83 martin NULL, /* 14 */ 74 1.83 martin NULL, /* 15 */ 75 1.83 martin "GPU", /* 16 */ 76 1.43 reinoud }; 77 1.43 reinoud #define NDEVNAMES __arraycount(virtio_device_name) 78 1.43 reinoud 79 1.67 yamaguch static void virtio_reset_vq(struct virtio_softc *, 80 1.67 yamaguch struct virtqueue *); 81 1.1 hannken 82 1.29 cherry void 83 1.1 hannken virtio_set_status(struct virtio_softc *sc, int status) 84 1.1 hannken { 85 1.31 jakllsch sc->sc_ops->set_status(sc, status); 86 1.11 ozaki } 87 1.11 ozaki 88 1.1 hannken /* 89 1.1 hannken * Reset the device. 90 1.1 hannken */ 91 1.1 hannken /* 92 1.1 hannken * To reset the device to a known state, do following: 93 1.1 hannken * virtio_reset(sc); // this will stop the device activity 94 1.1 hannken * <dequeue finished requests>; // virtio_dequeue() still can be called 95 1.1 hannken * <revoke pending requests in the vqs if any>; 96 1.80 andvar * virtio_reinit_start(sc); // dequeue prohibited 97 1.1 hannken * newfeatures = virtio_negotiate_features(sc, requestedfeatures); 98 1.1 hannken * <some other initialization>; 99 1.1 hannken * virtio_reinit_end(sc); // device activated; enqueue allowed 100 1.1 hannken * Once attached, feature negotiation can only be allowed after virtio_reset. 101 1.1 hannken */ 102 1.1 hannken void 103 1.1 hannken virtio_reset(struct virtio_softc *sc) 104 1.1 hannken { 105 1.1 hannken virtio_device_reset(sc); 106 1.1 hannken } 107 1.1 hannken 108 1.53 yamaguch int 109 1.1 hannken virtio_reinit_start(struct virtio_softc *sc) 110 1.1 hannken { 111 1.51 yamaguch int i, r; 112 1.1 hannken 113 1.1 hannken virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 114 1.1 hannken virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 115 1.1 hannken for (i = 0; i < sc->sc_nvqs; i++) { 116 1.1 hannken int n; 117 1.1 hannken struct virtqueue *vq = &sc->sc_vqs[i]; 118 1.31 jakllsch n = sc->sc_ops->read_queue_size(sc, vq->vq_index); 119 1.1 hannken if (n == 0) /* vq disappeared */ 120 1.1 hannken continue; 121 1.1 hannken if (n != vq->vq_num) { 122 1.1 hannken panic("%s: virtqueue size changed, vq index %d\n", 123 1.59 riastrad device_xname(sc->sc_dev), 124 1.59 riastrad vq->vq_index); 125 1.1 hannken } 126 1.67 yamaguch virtio_reset_vq(sc, vq); 127 1.31 jakllsch sc->sc_ops->setup_queue(sc, vq->vq_index, 128 1.43 reinoud vq->vq_dmamap->dm_segs[0].ds_addr); 129 1.11 ozaki } 130 1.51 yamaguch 131 1.51 yamaguch r = sc->sc_ops->setup_interrupts(sc, 1); 132 1.53 yamaguch if (r != 0) 133 1.53 yamaguch goto fail; 134 1.53 yamaguch 135 1.53 yamaguch return 0; 136 1.53 yamaguch 137 1.53 yamaguch fail: 138 1.53 yamaguch virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 139 1.53 yamaguch 140 1.53 yamaguch return 1; 141 1.1 hannken } 142 1.1 hannken 143 1.1 hannken void 144 1.1 hannken virtio_reinit_end(struct virtio_softc *sc) 145 1.1 hannken { 146 1.1 hannken virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 147 1.1 hannken } 148 1.1 hannken 149 1.1 hannken /* 150 1.1 hannken * Feature negotiation. 151 1.1 hannken */ 152 1.43 reinoud void 153 1.43 reinoud virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features) 154 1.1 hannken { 155 1.1 hannken if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) && 156 1.1 hannken !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */ 157 1.1 hannken guest_features |= VIRTIO_F_RING_INDIRECT_DESC; 158 1.43 reinoud sc->sc_ops->neg_features(sc, guest_features); 159 1.43 reinoud if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC) 160 1.1 hannken sc->sc_indirect = true; 161 1.1 hannken else 162 1.1 hannken sc->sc_indirect = false; 163 1.43 reinoud } 164 1.1 hannken 165 1.1 hannken 166 1.1 hannken /* 167 1.43 reinoud * Device configuration registers readers/writers 168 1.1 hannken */ 169 1.43 reinoud #if 0 170 1.43 reinoud #define DPRINTFR(n, fmt, val, index, num) \ 171 1.43 reinoud printf("\n%s (", n); \ 172 1.43 reinoud for (int i = 0; i < num; i++) \ 173 1.43 reinoud printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \ 174 1.43 reinoud printf(") -> "); printf(fmt, val); printf("\n"); 175 1.45 reinoud #define DPRINTFR2(n, fmt, val_s, val_n) \ 176 1.45 reinoud printf("%s ", n); \ 177 1.45 reinoud printf("\n stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n"); 178 1.43 reinoud #else 179 1.43 reinoud #define DPRINTFR(n, fmt, val, index, num) 180 1.45 reinoud #define DPRINTFR2(n, fmt, val_s, val_n) 181 1.43 reinoud #endif 182 1.43 reinoud 183 1.45 reinoud 184 1.1 hannken uint8_t 185 1.59 riastrad virtio_read_device_config_1(struct virtio_softc *sc, int index) 186 1.59 riastrad { 187 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 188 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 189 1.43 reinoud uint8_t val; 190 1.45 reinoud 191 1.45 reinoud val = bus_space_read_1(iot, ioh, index); 192 1.45 reinoud 193 1.43 reinoud DPRINTFR("read_1", "%02x", val, index, 1); 194 1.43 reinoud return val; 195 1.43 reinoud } 196 1.43 reinoud 197 1.43 reinoud uint16_t 198 1.59 riastrad virtio_read_device_config_2(struct virtio_softc *sc, int index) 199 1.59 riastrad { 200 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 201 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 202 1.43 reinoud uint16_t val; 203 1.45 reinoud 204 1.45 reinoud val = bus_space_read_2(iot, ioh, index); 205 1.45 reinoud if (BYTE_ORDER != sc->sc_bus_endian) 206 1.45 reinoud val = bswap16(val); 207 1.45 reinoud 208 1.43 reinoud DPRINTFR("read_2", "%04x", val, index, 2); 209 1.45 reinoud DPRINTFR2("read_2", "%04x", 210 1.59 riastrad bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 211 1.59 riastrad index), 212 1.59 riastrad bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 213 1.43 reinoud return val; 214 1.43 reinoud } 215 1.43 reinoud 216 1.43 reinoud uint32_t 217 1.59 riastrad virtio_read_device_config_4(struct virtio_softc *sc, int index) 218 1.59 riastrad { 219 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 220 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 221 1.43 reinoud uint32_t val; 222 1.45 reinoud 223 1.45 reinoud val = bus_space_read_4(iot, ioh, index); 224 1.45 reinoud if (BYTE_ORDER != sc->sc_bus_endian) 225 1.45 reinoud val = bswap32(val); 226 1.45 reinoud 227 1.43 reinoud DPRINTFR("read_4", "%08x", val, index, 4); 228 1.45 reinoud DPRINTFR2("read_4", "%08x", 229 1.59 riastrad bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 230 1.59 riastrad index), 231 1.59 riastrad bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 232 1.43 reinoud return val; 233 1.43 reinoud } 234 1.43 reinoud 235 1.46 reinoud /* 236 1.46 reinoud * The Virtio spec explicitly tells that reading and writing 8 bytes are not 237 1.46 reinoud * considered atomic and no triggers may be connected to reading or writing 238 1.47 reinoud * it. We access it using two 32 reads. See virtio spec 4.1.3.1. 239 1.46 reinoud */ 240 1.43 reinoud uint64_t 241 1.59 riastrad virtio_read_device_config_8(struct virtio_softc *sc, int index) 242 1.59 riastrad { 243 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 244 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 245 1.46 reinoud union { 246 1.46 reinoud uint64_t u64; 247 1.47 reinoud uint32_t l[2]; 248 1.46 reinoud } v; 249 1.46 reinoud uint64_t val; 250 1.46 reinoud 251 1.47 reinoud v.l[0] = bus_space_read_4(iot, ioh, index); 252 1.47 reinoud v.l[1] = bus_space_read_4(iot, ioh, index + 4); 253 1.47 reinoud if (sc->sc_bus_endian != sc->sc_struct_endian) { 254 1.47 reinoud v.l[0] = bswap32(v.l[0]); 255 1.47 reinoud v.l[1] = bswap32(v.l[1]); 256 1.47 reinoud } 257 1.46 reinoud val = v.u64; 258 1.45 reinoud 259 1.46 reinoud if (BYTE_ORDER != sc->sc_struct_endian) 260 1.46 reinoud val = bswap64(val); 261 1.45 reinoud 262 1.63 simonb DPRINTFR("read_8", "%08"PRIx64, val, index, 8); 263 1.45 reinoud DPRINTFR2("read_8 low ", "%08x", 264 1.59 riastrad bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 265 1.59 riastrad index), 266 1.59 riastrad bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index)); 267 1.45 reinoud DPRINTFR2("read_8 high ", "%08x", 268 1.59 riastrad bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 269 1.59 riastrad index + 4), 270 1.59 riastrad bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4)); 271 1.43 reinoud return val; 272 1.1 hannken } 273 1.1 hannken 274 1.43 reinoud /* 275 1.43 reinoud * In the older virtio spec, device config registers are host endian. On newer 276 1.45 reinoud * they are little endian. Some newer devices however explicitly specify their 277 1.55 andvar * register to always be little endian. These functions cater for these. 278 1.43 reinoud */ 279 1.1 hannken uint16_t 280 1.59 riastrad virtio_read_device_config_le_2(struct virtio_softc *sc, int index) 281 1.59 riastrad { 282 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 283 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 284 1.43 reinoud uint16_t val; 285 1.43 reinoud 286 1.45 reinoud val = bus_space_read_2(iot, ioh, index); 287 1.79 rin #if !defined(__aarch64__) && !defined(__arm__) 288 1.79 rin /* 289 1.79 rin * For big-endian aarch64/armv7, bus endian is always LSB, but 290 1.79 rin * byte-order is automatically swapped by bus_space(9) (see also 291 1.79 rin * comments in virtio_pci.c). Therefore, no need to swap here. 292 1.79 rin */ 293 1.45 reinoud if (sc->sc_bus_endian != LITTLE_ENDIAN) 294 1.45 reinoud val = bswap16(val); 295 1.79 rin #endif 296 1.45 reinoud 297 1.45 reinoud DPRINTFR("read_le_2", "%04x", val, index, 2); 298 1.45 reinoud DPRINTFR2("read_le_2", "%04x", 299 1.59 riastrad bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), 300 1.59 riastrad bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); 301 1.43 reinoud return val; 302 1.1 hannken } 303 1.1 hannken 304 1.1 hannken uint32_t 305 1.59 riastrad virtio_read_device_config_le_4(struct virtio_softc *sc, int index) 306 1.59 riastrad { 307 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 308 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 309 1.43 reinoud uint32_t val; 310 1.43 reinoud 311 1.45 reinoud val = bus_space_read_4(iot, ioh, index); 312 1.79 rin #if !defined(__aarch64__) && !defined(__arm__) 313 1.79 rin /* See virtio_read_device_config_le_2() above. */ 314 1.45 reinoud if (sc->sc_bus_endian != LITTLE_ENDIAN) 315 1.45 reinoud val = bswap32(val); 316 1.79 rin #endif 317 1.45 reinoud 318 1.43 reinoud DPRINTFR("read_le_4", "%08x", val, index, 4); 319 1.45 reinoud DPRINTFR2("read_le_4", "%08x", 320 1.59 riastrad bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0), 321 1.59 riastrad bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0)); 322 1.43 reinoud return val; 323 1.43 reinoud } 324 1.43 reinoud 325 1.43 reinoud void 326 1.43 reinoud virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value) 327 1.1 hannken { 328 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 329 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 330 1.45 reinoud 331 1.45 reinoud bus_space_write_1(iot, ioh, index, value); 332 1.1 hannken } 333 1.1 hannken 334 1.43 reinoud void 335 1.59 riastrad virtio_write_device_config_2(struct virtio_softc *sc, int index, 336 1.59 riastrad uint16_t value) 337 1.1 hannken { 338 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 339 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 340 1.45 reinoud 341 1.45 reinoud if (BYTE_ORDER != sc->sc_bus_endian) 342 1.45 reinoud value = bswap16(value); 343 1.45 reinoud bus_space_write_2(iot, ioh, index, value); 344 1.1 hannken } 345 1.1 hannken 346 1.1 hannken void 347 1.59 riastrad virtio_write_device_config_4(struct virtio_softc *sc, int index, 348 1.59 riastrad uint32_t value) 349 1.1 hannken { 350 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 351 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 352 1.45 reinoud 353 1.45 reinoud if (BYTE_ORDER != sc->sc_bus_endian) 354 1.45 reinoud value = bswap32(value); 355 1.45 reinoud bus_space_write_4(iot, ioh, index, value); 356 1.1 hannken } 357 1.1 hannken 358 1.46 reinoud /* 359 1.46 reinoud * The Virtio spec explicitly tells that reading and writing 8 bytes are not 360 1.46 reinoud * considered atomic and no triggers may be connected to reading or writing 361 1.47 reinoud * it. We access it using two 32 bit writes. For good measure it is stated to 362 1.47 reinoud * always write lsb first just in case of a hypervisor bug. See See virtio 363 1.47 reinoud * spec 4.1.3.1. 364 1.46 reinoud */ 365 1.1 hannken void 366 1.59 riastrad virtio_write_device_config_8(struct virtio_softc *sc, int index, 367 1.59 riastrad uint64_t value) 368 1.1 hannken { 369 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 370 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 371 1.46 reinoud union { 372 1.46 reinoud uint64_t u64; 373 1.47 reinoud uint32_t l[2]; 374 1.46 reinoud } v; 375 1.46 reinoud 376 1.46 reinoud if (BYTE_ORDER != sc->sc_struct_endian) 377 1.46 reinoud value = bswap64(value); 378 1.46 reinoud 379 1.46 reinoud v.u64 = value; 380 1.47 reinoud if (sc->sc_bus_endian != sc->sc_struct_endian) { 381 1.47 reinoud v.l[0] = bswap32(v.l[0]); 382 1.47 reinoud v.l[1] = bswap32(v.l[1]); 383 1.47 reinoud } 384 1.47 reinoud 385 1.47 reinoud if (sc->sc_struct_endian == LITTLE_ENDIAN) { 386 1.47 reinoud bus_space_write_4(iot, ioh, index, v.l[0]); 387 1.47 reinoud bus_space_write_4(iot, ioh, index + 4, v.l[1]); 388 1.47 reinoud } else { 389 1.47 reinoud bus_space_write_4(iot, ioh, index + 4, v.l[1]); 390 1.47 reinoud bus_space_write_4(iot, ioh, index, v.l[0]); 391 1.47 reinoud } 392 1.1 hannken } 393 1.1 hannken 394 1.43 reinoud /* 395 1.43 reinoud * In the older virtio spec, device config registers are host endian. On newer 396 1.45 reinoud * they are little endian. Some newer devices however explicitly specify their 397 1.55 andvar * register to always be little endian. These functions cater for these. 398 1.43 reinoud */ 399 1.1 hannken void 400 1.59 riastrad virtio_write_device_config_le_2(struct virtio_softc *sc, int index, 401 1.59 riastrad uint16_t value) 402 1.1 hannken { 403 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 404 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 405 1.45 reinoud 406 1.45 reinoud if (sc->sc_bus_endian != LITTLE_ENDIAN) 407 1.45 reinoud value = bswap16(value); 408 1.45 reinoud bus_space_write_2(iot, ioh, index, value); 409 1.1 hannken } 410 1.1 hannken 411 1.1 hannken void 412 1.59 riastrad virtio_write_device_config_le_4(struct virtio_softc *sc, int index, 413 1.59 riastrad uint32_t value) 414 1.43 reinoud { 415 1.45 reinoud bus_space_tag_t iot = sc->sc_devcfg_iot; 416 1.45 reinoud bus_space_handle_t ioh = sc->sc_devcfg_ioh; 417 1.45 reinoud 418 1.45 reinoud if (sc->sc_bus_endian != LITTLE_ENDIAN) 419 1.45 reinoud value = bswap32(value); 420 1.45 reinoud bus_space_write_4(iot, ioh, index, value); 421 1.43 reinoud } 422 1.43 reinoud 423 1.45 reinoud 424 1.43 reinoud /* 425 1.43 reinoud * data structures endian helpers 426 1.43 reinoud */ 427 1.59 riastrad uint16_t 428 1.59 riastrad virtio_rw16(struct virtio_softc *sc, uint16_t val) 429 1.1 hannken { 430 1.43 reinoud KASSERT(sc); 431 1.45 reinoud return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val; 432 1.1 hannken } 433 1.1 hannken 434 1.59 riastrad uint32_t 435 1.59 riastrad virtio_rw32(struct virtio_softc *sc, uint32_t val) 436 1.43 reinoud { 437 1.43 reinoud KASSERT(sc); 438 1.45 reinoud return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val; 439 1.43 reinoud } 440 1.43 reinoud 441 1.59 riastrad uint64_t 442 1.59 riastrad virtio_rw64(struct virtio_softc *sc, uint64_t val) 443 1.43 reinoud { 444 1.43 reinoud KASSERT(sc); 445 1.45 reinoud return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val; 446 1.43 reinoud } 447 1.43 reinoud 448 1.43 reinoud 449 1.1 hannken /* 450 1.1 hannken * Interrupt handler. 451 1.1 hannken */ 452 1.8 ozaki static void 453 1.8 ozaki virtio_soft_intr(void *arg) 454 1.8 ozaki { 455 1.8 ozaki struct virtio_softc *sc = arg; 456 1.8 ozaki 457 1.8 ozaki KASSERT(sc->sc_intrhand != NULL); 458 1.8 ozaki 459 1.54 uwe (*sc->sc_intrhand)(sc); 460 1.8 ozaki } 461 1.8 ozaki 462 1.67 yamaguch /* set to vq->vq_intrhand in virtio_init_vq_vqdone() */ 463 1.67 yamaguch static int 464 1.67 yamaguch virtio_vq_done(void *xvq) 465 1.67 yamaguch { 466 1.67 yamaguch struct virtqueue *vq = xvq; 467 1.67 yamaguch 468 1.67 yamaguch return vq->vq_done(vq); 469 1.67 yamaguch } 470 1.67 yamaguch 471 1.67 yamaguch static int 472 1.67 yamaguch virtio_vq_intr(struct virtio_softc *sc) 473 1.67 yamaguch { 474 1.67 yamaguch struct virtqueue *vq; 475 1.67 yamaguch int i, r = 0; 476 1.67 yamaguch 477 1.67 yamaguch for (i = 0; i < sc->sc_nvqs; i++) { 478 1.67 yamaguch vq = &sc->sc_vqs[i]; 479 1.67 yamaguch if (virtio_vq_is_enqueued(sc, vq) == 1) { 480 1.67 yamaguch r |= (*vq->vq_intrhand)(vq->vq_intrhand_arg); 481 1.67 yamaguch } 482 1.67 yamaguch } 483 1.67 yamaguch 484 1.67 yamaguch return r; 485 1.67 yamaguch } 486 1.67 yamaguch 487 1.1 hannken /* 488 1.1 hannken * dmamap sync operations for a virtqueue. 489 1.1 hannken */ 490 1.1 hannken static inline void 491 1.1 hannken vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops) 492 1.1 hannken { 493 1.57 riastrad 494 1.62 skrll /* availoffset == sizeof(vring_desc) * vq_num */ 495 1.1 hannken bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset, 496 1.57 riastrad ops); 497 1.1 hannken } 498 1.1 hannken 499 1.1 hannken static inline void 500 1.57 riastrad vq_sync_aring_all(struct virtio_softc *sc, struct virtqueue *vq, int ops) 501 1.1 hannken { 502 1.43 reinoud uint16_t hdrlen = offsetof(struct vring_avail, ring); 503 1.64 jakllsch size_t payloadlen = vq->vq_num * sizeof(uint16_t); 504 1.57 riastrad size_t usedlen = 0; 505 1.57 riastrad 506 1.43 reinoud if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) 507 1.57 riastrad usedlen = sizeof(uint16_t); 508 1.57 riastrad bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 509 1.57 riastrad vq->vq_availoffset, hdrlen + payloadlen + usedlen, ops); 510 1.57 riastrad } 511 1.57 riastrad 512 1.57 riastrad static inline void 513 1.57 riastrad vq_sync_aring_header(struct virtio_softc *sc, struct virtqueue *vq, int ops) 514 1.57 riastrad { 515 1.57 riastrad uint16_t hdrlen = offsetof(struct vring_avail, ring); 516 1.57 riastrad 517 1.57 riastrad bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 518 1.57 riastrad vq->vq_availoffset, hdrlen, ops); 519 1.57 riastrad } 520 1.57 riastrad 521 1.57 riastrad static inline void 522 1.57 riastrad vq_sync_aring_payload(struct virtio_softc *sc, struct virtqueue *vq, int ops) 523 1.57 riastrad { 524 1.57 riastrad uint16_t hdrlen = offsetof(struct vring_avail, ring); 525 1.64 jakllsch size_t payloadlen = vq->vq_num * sizeof(uint16_t); 526 1.43 reinoud 527 1.1 hannken bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 528 1.57 riastrad vq->vq_availoffset + hdrlen, payloadlen, ops); 529 1.1 hannken } 530 1.1 hannken 531 1.1 hannken static inline void 532 1.57 riastrad vq_sync_aring_used(struct virtio_softc *sc, struct virtqueue *vq, int ops) 533 1.57 riastrad { 534 1.57 riastrad uint16_t hdrlen = offsetof(struct vring_avail, ring); 535 1.64 jakllsch size_t payloadlen = vq->vq_num * sizeof(uint16_t); 536 1.57 riastrad size_t usedlen = sizeof(uint16_t); 537 1.57 riastrad 538 1.57 riastrad if ((sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) == 0) 539 1.57 riastrad return; 540 1.57 riastrad bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 541 1.57 riastrad vq->vq_availoffset + hdrlen + payloadlen, usedlen, ops); 542 1.57 riastrad } 543 1.57 riastrad 544 1.57 riastrad static inline void 545 1.57 riastrad vq_sync_uring_all(struct virtio_softc *sc, struct virtqueue *vq, int ops) 546 1.1 hannken { 547 1.43 reinoud uint16_t hdrlen = offsetof(struct vring_used, ring); 548 1.64 jakllsch size_t payloadlen = vq->vq_num * sizeof(struct vring_used_elem); 549 1.57 riastrad size_t availlen = 0; 550 1.57 riastrad 551 1.43 reinoud if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) 552 1.57 riastrad availlen = sizeof(uint16_t); 553 1.57 riastrad bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 554 1.57 riastrad vq->vq_usedoffset, hdrlen + payloadlen + availlen, ops); 555 1.57 riastrad } 556 1.57 riastrad 557 1.57 riastrad static inline void 558 1.57 riastrad vq_sync_uring_header(struct virtio_softc *sc, struct virtqueue *vq, int ops) 559 1.57 riastrad { 560 1.57 riastrad uint16_t hdrlen = offsetof(struct vring_used, ring); 561 1.43 reinoud 562 1.1 hannken bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 563 1.57 riastrad vq->vq_usedoffset, hdrlen, ops); 564 1.57 riastrad } 565 1.57 riastrad 566 1.57 riastrad static inline void 567 1.57 riastrad vq_sync_uring_payload(struct virtio_softc *sc, struct virtqueue *vq, int ops) 568 1.57 riastrad { 569 1.57 riastrad uint16_t hdrlen = offsetof(struct vring_used, ring); 570 1.64 jakllsch size_t payloadlen = vq->vq_num * sizeof(struct vring_used_elem); 571 1.57 riastrad 572 1.57 riastrad bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 573 1.57 riastrad vq->vq_usedoffset + hdrlen, payloadlen, ops); 574 1.57 riastrad } 575 1.57 riastrad 576 1.57 riastrad static inline void 577 1.57 riastrad vq_sync_uring_avail(struct virtio_softc *sc, struct virtqueue *vq, int ops) 578 1.57 riastrad { 579 1.57 riastrad uint16_t hdrlen = offsetof(struct vring_used, ring); 580 1.64 jakllsch size_t payloadlen = vq->vq_num * sizeof(struct vring_used_elem); 581 1.57 riastrad size_t availlen = sizeof(uint16_t); 582 1.57 riastrad 583 1.57 riastrad if ((sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) == 0) 584 1.57 riastrad return; 585 1.57 riastrad bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 586 1.57 riastrad vq->vq_usedoffset + hdrlen + payloadlen, availlen, ops); 587 1.1 hannken } 588 1.1 hannken 589 1.1 hannken static inline void 590 1.1 hannken vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot, 591 1.57 riastrad int ops) 592 1.1 hannken { 593 1.57 riastrad int offset = vq->vq_indirectoffset + 594 1.57 riastrad sizeof(struct vring_desc) * vq->vq_maxnsegs * slot; 595 1.1 hannken 596 1.1 hannken bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 597 1.57 riastrad offset, sizeof(struct vring_desc) * vq->vq_maxnsegs, ops); 598 1.1 hannken } 599 1.1 hannken 600 1.41 yamaguch bool 601 1.41 yamaguch virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq) 602 1.37 yamaguch { 603 1.37 yamaguch 604 1.37 yamaguch if (vq->vq_queued) { 605 1.37 yamaguch vq->vq_queued = 0; 606 1.57 riastrad vq_sync_aring_all(sc, vq, BUS_DMASYNC_POSTWRITE); 607 1.37 yamaguch } 608 1.37 yamaguch 609 1.57 riastrad vq_sync_uring_header(sc, vq, BUS_DMASYNC_POSTREAD); 610 1.57 riastrad if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx)) 611 1.57 riastrad return 0; 612 1.57 riastrad vq_sync_uring_payload(sc, vq, BUS_DMASYNC_POSTREAD); 613 1.57 riastrad return 1; 614 1.37 yamaguch } 615 1.37 yamaguch 616 1.56 riastrad /* 617 1.43 reinoud * Increase the event index in order to delay interrupts. 618 1.43 reinoud */ 619 1.43 reinoud int 620 1.43 reinoud virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq, 621 1.59 riastrad uint16_t nslots) 622 1.43 reinoud { 623 1.43 reinoud uint16_t idx, nused; 624 1.43 reinoud 625 1.43 reinoud idx = vq->vq_used_idx + nslots; 626 1.43 reinoud 627 1.43 reinoud /* set the new event index: avail_ring->used_event = idx */ 628 1.43 reinoud *vq->vq_used_event = virtio_rw16(sc, idx); 629 1.57 riastrad vq_sync_aring_used(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE); 630 1.43 reinoud vq->vq_queued++; 631 1.43 reinoud 632 1.43 reinoud nused = (uint16_t) 633 1.59 riastrad (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx); 634 1.43 reinoud KASSERT(nused <= vq->vq_num); 635 1.43 reinoud 636 1.43 reinoud return nslots < nused; 637 1.43 reinoud } 638 1.43 reinoud 639 1.43 reinoud /* 640 1.43 reinoud * Postpone interrupt until 3/4 of the available descriptors have been 641 1.43 reinoud * consumed. 642 1.43 reinoud */ 643 1.43 reinoud int 644 1.43 reinoud virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq) 645 1.43 reinoud { 646 1.43 reinoud uint16_t nslots; 647 1.43 reinoud 648 1.43 reinoud nslots = (uint16_t) 649 1.59 riastrad (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4; 650 1.43 reinoud 651 1.43 reinoud return virtio_postpone_intr(sc, vq, nslots); 652 1.43 reinoud } 653 1.43 reinoud 654 1.43 reinoud /* 655 1.43 reinoud * Postpone interrupt until all of the available descriptors have been 656 1.43 reinoud * consumed. 657 1.43 reinoud */ 658 1.43 reinoud int 659 1.43 reinoud virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq) 660 1.43 reinoud { 661 1.43 reinoud uint16_t nslots; 662 1.43 reinoud 663 1.43 reinoud nslots = (uint16_t) 664 1.59 riastrad (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx); 665 1.43 reinoud 666 1.43 reinoud return virtio_postpone_intr(sc, vq, nslots); 667 1.43 reinoud } 668 1.43 reinoud 669 1.1 hannken /* 670 1.1 hannken * Start/stop vq interrupt. No guarantee. 671 1.1 hannken */ 672 1.1 hannken void 673 1.1 hannken virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 674 1.1 hannken { 675 1.57 riastrad 676 1.43 reinoud if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 677 1.43 reinoud /* 678 1.43 reinoud * No way to disable the interrupt completely with 679 1.43 reinoud * RingEventIdx. Instead advance used_event by half the 680 1.43 reinoud * possible value. This won't happen soon and is far enough in 681 1.80 andvar * the past to not trigger a spurious interrupt. 682 1.43 reinoud */ 683 1.43 reinoud *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000); 684 1.57 riastrad vq_sync_aring_used(sc, vq, BUS_DMASYNC_PREWRITE); 685 1.43 reinoud } else { 686 1.57 riastrad vq->vq_avail->flags |= 687 1.57 riastrad virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); 688 1.57 riastrad vq_sync_aring_header(sc, vq, BUS_DMASYNC_PREWRITE); 689 1.43 reinoud } 690 1.1 hannken vq->vq_queued++; 691 1.1 hannken } 692 1.1 hannken 693 1.43 reinoud int 694 1.1 hannken virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq) 695 1.1 hannken { 696 1.57 riastrad 697 1.43 reinoud if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 698 1.43 reinoud /* 699 1.43 reinoud * If event index feature is negotiated, enabling interrupts 700 1.43 reinoud * is done through setting the latest consumed index in the 701 1.43 reinoud * used_event field 702 1.43 reinoud */ 703 1.43 reinoud *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx); 704 1.57 riastrad vq_sync_aring_used(sc, vq, BUS_DMASYNC_PREWRITE); 705 1.43 reinoud } else { 706 1.57 riastrad vq->vq_avail->flags &= 707 1.57 riastrad ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT); 708 1.57 riastrad vq_sync_aring_header(sc, vq, BUS_DMASYNC_PREWRITE); 709 1.43 reinoud } 710 1.1 hannken vq->vq_queued++; 711 1.43 reinoud 712 1.84 riastrad /* 713 1.84 riastrad * Ensure we announce to the host side that we are accepting 714 1.84 riastrad * interrupts _before_ we check whether any pending events had 715 1.84 riastrad * come over the queue while we weren't accepting interrupts. 716 1.84 riastrad */ 717 1.84 riastrad paravirt_membar_sync(); 718 1.84 riastrad 719 1.57 riastrad vq_sync_uring_header(sc, vq, BUS_DMASYNC_POSTREAD); 720 1.57 riastrad if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx)) 721 1.57 riastrad return 0; 722 1.57 riastrad vq_sync_uring_payload(sc, vq, BUS_DMASYNC_POSTREAD); 723 1.57 riastrad return 1; 724 1.1 hannken } 725 1.1 hannken 726 1.1 hannken /* 727 1.1 hannken * Initialize vq structure. 728 1.1 hannken */ 729 1.67 yamaguch /* 730 1.67 yamaguch * Reset virtqueue parameters 731 1.67 yamaguch */ 732 1.1 hannken static void 733 1.67 yamaguch virtio_reset_vq(struct virtio_softc *sc, struct virtqueue *vq) 734 1.1 hannken { 735 1.73 yamaguch struct vring_desc *vds; 736 1.1 hannken int i, j; 737 1.1 hannken int vq_size = vq->vq_num; 738 1.1 hannken 739 1.1 hannken memset(vq->vq_vaddr, 0, vq->vq_bytesize); 740 1.1 hannken 741 1.73 yamaguch /* build the descriptor chain for free slot management */ 742 1.73 yamaguch vds = vq->vq_desc; 743 1.73 yamaguch for (i = 0; i < vq_size - 1; i++) { 744 1.73 yamaguch vds[i].next = virtio_rw16(sc, i + 1); 745 1.73 yamaguch } 746 1.73 yamaguch vds[i].next = virtio_rw16(sc, VRING_DESC_CHAIN_END); 747 1.73 yamaguch vq->vq_free_idx = 0; 748 1.73 yamaguch 749 1.1 hannken /* build the indirect descriptor chain */ 750 1.1 hannken if (vq->vq_indirect != NULL) { 751 1.1 hannken struct vring_desc *vd; 752 1.1 hannken 753 1.1 hannken for (i = 0; i < vq_size; i++) { 754 1.1 hannken vd = vq->vq_indirect; 755 1.1 hannken vd += vq->vq_maxnsegs * i; 756 1.59 riastrad for (j = 0; j < vq->vq_maxnsegs - 1; j++) { 757 1.43 reinoud vd[j].next = virtio_rw16(sc, j + 1); 758 1.23 martin } 759 1.1 hannken } 760 1.1 hannken } 761 1.1 hannken 762 1.1 hannken /* enqueue/dequeue status */ 763 1.1 hannken vq->vq_avail_idx = 0; 764 1.1 hannken vq->vq_used_idx = 0; 765 1.1 hannken vq->vq_queued = 0; 766 1.57 riastrad vq_sync_uring_all(sc, vq, BUS_DMASYNC_PREREAD); 767 1.1 hannken vq->vq_queued++; 768 1.1 hannken } 769 1.48 skrll 770 1.67 yamaguch /* Initialize vq */ 771 1.67 yamaguch void 772 1.67 yamaguch virtio_init_vq_vqdone(struct virtio_softc *sc, struct virtqueue *vq, 773 1.67 yamaguch int index, int (*vq_done)(struct virtqueue *)) 774 1.67 yamaguch { 775 1.67 yamaguch 776 1.67 yamaguch virtio_init_vq(sc, vq, index, virtio_vq_done, vq); 777 1.67 yamaguch vq->vq_done = vq_done; 778 1.67 yamaguch } 779 1.67 yamaguch 780 1.67 yamaguch void 781 1.67 yamaguch virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, 782 1.70 riastrad int (*func)(void *), void *arg) 783 1.67 yamaguch { 784 1.67 yamaguch 785 1.67 yamaguch memset(vq, 0, sizeof(*vq)); 786 1.67 yamaguch 787 1.67 yamaguch vq->vq_owner = sc; 788 1.67 yamaguch vq->vq_num = sc->sc_ops->read_queue_size(sc, index); 789 1.67 yamaguch vq->vq_index = index; 790 1.70 riastrad vq->vq_intrhand = func; 791 1.67 yamaguch vq->vq_intrhand_arg = arg; 792 1.67 yamaguch } 793 1.67 yamaguch 794 1.1 hannken /* 795 1.1 hannken * Allocate/free a vq. 796 1.1 hannken */ 797 1.1 hannken int 798 1.67 yamaguch virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, 799 1.15 msaitoh int maxsegsize, int maxnsegs, const char *name) 800 1.1 hannken { 801 1.67 yamaguch bus_size_t size_desc, size_avail, size_used, size_indirect; 802 1.67 yamaguch bus_size_t allocsize = 0, size_desc_avail; 803 1.43 reinoud int rsegs, r, hdrlen; 804 1.67 yamaguch unsigned int vq_num; 805 1.61 skrll #define VIRTQUEUE_ALIGN(n) roundup(n, VIRTIO_PAGE_SIZE) 806 1.1 hannken 807 1.67 yamaguch vq_num = vq->vq_num; 808 1.1 hannken 809 1.67 yamaguch if (vq_num == 0) { 810 1.1 hannken aprint_error_dev(sc->sc_dev, 811 1.59 riastrad "virtqueue not exist, index %d for %s\n", 812 1.67 yamaguch vq->vq_index, name); 813 1.1 hannken goto err; 814 1.1 hannken } 815 1.43 reinoud 816 1.43 reinoud hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2; 817 1.43 reinoud 818 1.67 yamaguch size_desc = sizeof(vq->vq_desc[0]) * vq_num; 819 1.67 yamaguch size_avail = sizeof(uint16_t) * hdrlen 820 1.72 riastrad + sizeof(vq->vq_avail[0].ring[0]) * vq_num; 821 1.67 yamaguch size_used = sizeof(uint16_t) *hdrlen 822 1.72 riastrad + sizeof(vq->vq_used[0].ring[0]) * vq_num; 823 1.67 yamaguch size_indirect = (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) ? 824 1.67 yamaguch sizeof(struct vring_desc) * maxnsegs * vq_num : 0; 825 1.67 yamaguch 826 1.67 yamaguch size_desc_avail = VIRTQUEUE_ALIGN(size_desc + size_avail); 827 1.67 yamaguch size_used = VIRTQUEUE_ALIGN(size_used); 828 1.67 yamaguch 829 1.67 yamaguch allocsize = size_desc_avail + size_used + size_indirect; 830 1.1 hannken 831 1.1 hannken /* alloc and map the memory */ 832 1.1 hannken r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, 833 1.59 riastrad &vq->vq_segs[0], 1, &rsegs, BUS_DMA_WAITOK); 834 1.1 hannken if (r != 0) { 835 1.1 hannken aprint_error_dev(sc->sc_dev, 836 1.59 riastrad "virtqueue %d for %s allocation failed, " 837 1.67 yamaguch "error code %d\n", vq->vq_index, name, r); 838 1.1 hannken goto err; 839 1.1 hannken } 840 1.67 yamaguch 841 1.43 reinoud r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize, 842 1.59 riastrad &vq->vq_vaddr, BUS_DMA_WAITOK); 843 1.1 hannken if (r != 0) { 844 1.1 hannken aprint_error_dev(sc->sc_dev, 845 1.59 riastrad "virtqueue %d for %s map failed, " 846 1.67 yamaguch "error code %d\n", vq->vq_index, name, r); 847 1.1 hannken goto err; 848 1.1 hannken } 849 1.67 yamaguch 850 1.1 hannken r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, 851 1.59 riastrad BUS_DMA_WAITOK, &vq->vq_dmamap); 852 1.1 hannken if (r != 0) { 853 1.1 hannken aprint_error_dev(sc->sc_dev, 854 1.59 riastrad "virtqueue %d for %s dmamap creation failed, " 855 1.67 yamaguch "error code %d\n", vq->vq_index, name, r); 856 1.1 hannken goto err; 857 1.1 hannken } 858 1.67 yamaguch 859 1.1 hannken r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, 860 1.59 riastrad vq->vq_vaddr, allocsize, NULL, BUS_DMA_WAITOK); 861 1.1 hannken if (r != 0) { 862 1.1 hannken aprint_error_dev(sc->sc_dev, 863 1.59 riastrad "virtqueue %d for %s dmamap load failed, " 864 1.67 yamaguch "error code %d\n", vq->vq_index, name, r); 865 1.1 hannken goto err; 866 1.1 hannken } 867 1.1 hannken 868 1.1 hannken vq->vq_bytesize = allocsize; 869 1.1 hannken vq->vq_maxsegsize = maxsegsize; 870 1.1 hannken vq->vq_maxnsegs = maxnsegs; 871 1.1 hannken 872 1.67 yamaguch #define VIRTIO_PTR(base, offset) (void *)((intptr_t)(base) + (offset)) 873 1.67 yamaguch /* initialize vring pointers */ 874 1.67 yamaguch vq->vq_desc = VIRTIO_PTR(vq->vq_vaddr, 0); 875 1.67 yamaguch vq->vq_availoffset = size_desc; 876 1.67 yamaguch vq->vq_avail = VIRTIO_PTR(vq->vq_vaddr, vq->vq_availoffset); 877 1.67 yamaguch vq->vq_used_event = VIRTIO_PTR(vq->vq_avail, 878 1.67 yamaguch offsetof(struct vring_avail, ring[vq_num])); 879 1.67 yamaguch vq->vq_usedoffset = size_desc_avail; 880 1.67 yamaguch vq->vq_used = VIRTIO_PTR(vq->vq_vaddr, vq->vq_usedoffset); 881 1.67 yamaguch vq->vq_avail_event = VIRTIO_PTR(vq->vq_used, 882 1.67 yamaguch offsetof(struct vring_used, ring[vq_num])); 883 1.67 yamaguch 884 1.67 yamaguch if (size_indirect > 0) { 885 1.67 yamaguch vq->vq_indirectoffset = size_desc_avail + size_used; 886 1.67 yamaguch vq->vq_indirect = VIRTIO_PTR(vq->vq_vaddr, 887 1.67 yamaguch vq->vq_indirectoffset); 888 1.67 yamaguch } 889 1.67 yamaguch #undef VIRTIO_PTR 890 1.67 yamaguch 891 1.73 yamaguch vq->vq_descx = kmem_zalloc(sizeof(vq->vq_descx[0]) * vq_num, 892 1.59 riastrad KM_SLEEP); 893 1.67 yamaguch 894 1.73 yamaguch mutex_init(&vq->vq_freedesc_lock, MUTEX_SPIN, sc->sc_ipl); 895 1.67 yamaguch mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); 896 1.67 yamaguch mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); 897 1.67 yamaguch 898 1.67 yamaguch virtio_reset_vq(sc, vq); 899 1.1 hannken 900 1.1 hannken aprint_verbose_dev(sc->sc_dev, 901 1.71 nakayama "allocated %" PRIuBUSSIZE " byte for virtqueue %d for %s, " 902 1.71 nakayama "size %d\n", allocsize, vq->vq_index, name, vq_num); 903 1.67 yamaguch if (size_indirect > 0) 904 1.1 hannken aprint_verbose_dev(sc->sc_dev, 905 1.71 nakayama "using %" PRIuBUSSIZE " byte (%d entries) indirect " 906 1.71 nakayama "descriptors\n", size_indirect, maxnsegs * vq_num); 907 1.22 jdolecek 908 1.1 hannken return 0; 909 1.1 hannken 910 1.1 hannken err: 911 1.67 yamaguch sc->sc_ops->setup_queue(sc, vq->vq_index, 0); 912 1.1 hannken if (vq->vq_dmamap) 913 1.1 hannken bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 914 1.1 hannken if (vq->vq_vaddr) 915 1.1 hannken bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize); 916 1.1 hannken if (vq->vq_segs[0].ds_addr) 917 1.1 hannken bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 918 1.1 hannken memset(vq, 0, sizeof(*vq)); 919 1.1 hannken 920 1.1 hannken return -1; 921 1.1 hannken } 922 1.1 hannken 923 1.1 hannken int 924 1.1 hannken virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) 925 1.1 hannken { 926 1.73 yamaguch uint16_t s; 927 1.73 yamaguch size_t i; 928 1.1 hannken 929 1.68 yamaguch if (vq->vq_vaddr == NULL) 930 1.68 yamaguch return 0; 931 1.68 yamaguch 932 1.1 hannken /* device must be already deactivated */ 933 1.1 hannken /* confirm the vq is empty */ 934 1.73 yamaguch s = vq->vq_free_idx; 935 1.73 yamaguch i = 0; 936 1.73 yamaguch while (s != virtio_rw16(sc, VRING_DESC_CHAIN_END)) { 937 1.73 yamaguch s = vq->vq_desc[s].next; 938 1.1 hannken i++; 939 1.1 hannken } 940 1.1 hannken if (i != vq->vq_num) { 941 1.1 hannken printf("%s: freeing non-empty vq, index %d\n", 942 1.59 riastrad device_xname(sc->sc_dev), vq->vq_index); 943 1.1 hannken return EBUSY; 944 1.1 hannken } 945 1.1 hannken 946 1.1 hannken /* tell device that there's no virtqueue any longer */ 947 1.31 jakllsch sc->sc_ops->setup_queue(sc, vq->vq_index, 0); 948 1.1 hannken 949 1.57 riastrad vq_sync_aring_all(sc, vq, BUS_DMASYNC_POSTWRITE); 950 1.57 riastrad 951 1.73 yamaguch kmem_free(vq->vq_descx, sizeof(vq->vq_descx[0]) * vq->vq_num); 952 1.1 hannken bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); 953 1.1 hannken bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); 954 1.1 hannken bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); 955 1.1 hannken bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); 956 1.73 yamaguch mutex_destroy(&vq->vq_freedesc_lock); 957 1.1 hannken mutex_destroy(&vq->vq_uring_lock); 958 1.1 hannken mutex_destroy(&vq->vq_aring_lock); 959 1.1 hannken memset(vq, 0, sizeof(*vq)); 960 1.1 hannken 961 1.1 hannken return 0; 962 1.1 hannken } 963 1.1 hannken 964 1.1 hannken /* 965 1.1 hannken * Free descriptor management. 966 1.1 hannken */ 967 1.73 yamaguch static int 968 1.73 yamaguch vq_alloc_slot_locked(struct virtio_softc *sc, struct virtqueue *vq, 969 1.73 yamaguch size_t nslots) 970 1.1 hannken { 971 1.73 yamaguch struct vring_desc *vd; 972 1.77 yamaguch uint16_t head, tail; 973 1.73 yamaguch size_t i; 974 1.73 yamaguch 975 1.73 yamaguch KASSERT(mutex_owned(&vq->vq_freedesc_lock)); 976 1.1 hannken 977 1.77 yamaguch head = tail = virtio_rw16(sc, vq->vq_free_idx); 978 1.73 yamaguch for (i = 0; i < nslots - 1; i++) { 979 1.73 yamaguch if (tail == VRING_DESC_CHAIN_END) 980 1.73 yamaguch return VRING_DESC_CHAIN_END; 981 1.73 yamaguch 982 1.73 yamaguch vd = &vq->vq_desc[tail]; 983 1.73 yamaguch vd->flags = virtio_rw16(sc, VRING_DESC_F_NEXT); 984 1.73 yamaguch tail = virtio_rw16(sc, vd->next); 985 1.1 hannken } 986 1.1 hannken 987 1.73 yamaguch if (tail == VRING_DESC_CHAIN_END) 988 1.73 yamaguch return VRING_DESC_CHAIN_END; 989 1.73 yamaguch 990 1.73 yamaguch vd = &vq->vq_desc[tail]; 991 1.73 yamaguch vd->flags = virtio_rw16(sc, 0); 992 1.73 yamaguch vq->vq_free_idx = vd->next; 993 1.73 yamaguch 994 1.77 yamaguch return head; 995 1.73 yamaguch } 996 1.73 yamaguch static uint16_t 997 1.73 yamaguch vq_alloc_slot(struct virtio_softc *sc, struct virtqueue *vq, size_t nslots) 998 1.73 yamaguch { 999 1.73 yamaguch uint16_t rv; 1000 1.73 yamaguch 1001 1.73 yamaguch mutex_enter(&vq->vq_freedesc_lock); 1002 1.73 yamaguch rv = vq_alloc_slot_locked(sc, vq, nslots); 1003 1.73 yamaguch mutex_exit(&vq->vq_freedesc_lock); 1004 1.73 yamaguch 1005 1.73 yamaguch return rv; 1006 1.1 hannken } 1007 1.1 hannken 1008 1.1 hannken static void 1009 1.73 yamaguch vq_free_slot(struct virtio_softc *sc, struct virtqueue *vq, uint16_t slot) 1010 1.1 hannken { 1011 1.73 yamaguch struct vring_desc *vd; 1012 1.73 yamaguch uint16_t s; 1013 1.1 hannken 1014 1.73 yamaguch mutex_enter(&vq->vq_freedesc_lock); 1015 1.73 yamaguch vd = &vq->vq_desc[slot]; 1016 1.73 yamaguch while ((vd->flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) != 0) { 1017 1.73 yamaguch s = virtio_rw16(sc, vd->next); 1018 1.73 yamaguch vd = &vq->vq_desc[s]; 1019 1.73 yamaguch } 1020 1.73 yamaguch vd->next = vq->vq_free_idx; 1021 1.73 yamaguch vq->vq_free_idx = virtio_rw16(sc, slot); 1022 1.73 yamaguch mutex_exit(&vq->vq_freedesc_lock); 1023 1.1 hannken } 1024 1.1 hannken 1025 1.1 hannken /* 1026 1.1 hannken * Enqueue several dmamaps as a single request. 1027 1.1 hannken */ 1028 1.1 hannken /* 1029 1.1 hannken * Typical usage: 1030 1.1 hannken * <queue size> number of followings are stored in arrays 1031 1.1 hannken * - command blocks (in dmamem) should be pre-allocated and mapped 1032 1.1 hannken * - dmamaps for command blocks should be pre-allocated and loaded 1033 1.1 hannken * - dmamaps for payload should be pre-allocated 1034 1.1 hannken * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot 1035 1.1 hannken * if (r) // currently 0 or EAGAIN 1036 1.59 riastrad * return r; 1037 1.1 hannken * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..); 1038 1.1 hannken * if (r) { 1039 1.59 riastrad * virtio_enqueue_abort(sc, vq, slot); 1040 1.59 riastrad * return r; 1041 1.1 hannken * } 1042 1.48 skrll * r = virtio_enqueue_reserve(sc, vq, slot, 1043 1.59 riastrad * dmamap_payload[slot]->dm_nsegs + 1); 1044 1.1 hannken * // ^ +1 for command 1045 1.1 hannken * if (r) { // currently 0 or EAGAIN 1046 1.59 riastrad * bus_dmamap_unload(dmat, dmamap_payload[slot]); 1047 1.59 riastrad * return r; // do not call abort() 1048 1.1 hannken * } 1049 1.1 hannken * <setup and prepare commands> 1050 1.1 hannken * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE); 1051 1.1 hannken * bus_dmamap_sync(dmat, dmamap_payload[slot],...); 1052 1.1 hannken * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false); 1053 1.1 hannken * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite); 1054 1.1 hannken * virtio_enqueue_commit(sc, vq, slot, true); 1055 1.1 hannken */ 1056 1.1 hannken 1057 1.1 hannken /* 1058 1.1 hannken * enqueue_prep: allocate a slot number 1059 1.1 hannken */ 1060 1.1 hannken int 1061 1.1 hannken virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) 1062 1.1 hannken { 1063 1.73 yamaguch uint16_t slot; 1064 1.1 hannken 1065 1.82 riastrad KASSERT(sc->sc_child_state == VIRTIO_CHILD_ATTACH_FINISHED); 1066 1.1 hannken KASSERT(slotp != NULL); 1067 1.1 hannken 1068 1.73 yamaguch slot = vq_alloc_slot(sc, vq, 1); 1069 1.73 yamaguch if (slot == VRING_DESC_CHAIN_END) 1070 1.1 hannken return EAGAIN; 1071 1.73 yamaguch 1072 1.73 yamaguch *slotp = slot; 1073 1.1 hannken 1074 1.1 hannken return 0; 1075 1.1 hannken } 1076 1.1 hannken 1077 1.1 hannken /* 1078 1.1 hannken * enqueue_reserve: allocate remaining slots and build the descriptor chain. 1079 1.1 hannken */ 1080 1.1 hannken int 1081 1.1 hannken virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, 1082 1.59 riastrad int slot, int nsegs) 1083 1.1 hannken { 1084 1.73 yamaguch struct vring_desc *vd; 1085 1.73 yamaguch struct vring_desc_extra *vdx; 1086 1.73 yamaguch int i; 1087 1.1 hannken 1088 1.81 isaki KASSERT(1 <= nsegs); 1089 1.81 isaki KASSERT(nsegs <= vq->vq_num); 1090 1.1 hannken 1091 1.73 yamaguch vdx = &vq->vq_descx[slot]; 1092 1.73 yamaguch vd = &vq->vq_desc[slot]; 1093 1.73 yamaguch 1094 1.73 yamaguch KASSERT((vd->flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0); 1095 1.73 yamaguch 1096 1.1 hannken if ((vq->vq_indirect != NULL) && 1097 1.1 hannken (nsegs >= MINSEG_INDIRECT) && 1098 1.1 hannken (nsegs <= vq->vq_maxnsegs)) 1099 1.73 yamaguch vdx->use_indirect = true; 1100 1.1 hannken else 1101 1.73 yamaguch vdx->use_indirect = false; 1102 1.1 hannken 1103 1.73 yamaguch if (vdx->use_indirect) { 1104 1.43 reinoud uint64_t addr; 1105 1.1 hannken 1106 1.43 reinoud addr = vq->vq_dmamap->dm_segs[0].ds_addr 1107 1.59 riastrad + vq->vq_indirectoffset; 1108 1.43 reinoud addr += sizeof(struct vring_desc) 1109 1.73 yamaguch * vq->vq_maxnsegs * slot; 1110 1.73 yamaguch 1111 1.43 reinoud vd->addr = virtio_rw64(sc, addr); 1112 1.43 reinoud vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs); 1113 1.43 reinoud vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT); 1114 1.1 hannken 1115 1.73 yamaguch vd = &vq->vq_indirect[vq->vq_maxnsegs * slot]; 1116 1.73 yamaguch vdx->desc_base = vd; 1117 1.73 yamaguch vdx->desc_free_idx = 0; 1118 1.1 hannken 1119 1.59 riastrad for (i = 0; i < nsegs - 1; i++) { 1120 1.43 reinoud vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); 1121 1.1 hannken } 1122 1.43 reinoud vd[i].flags = virtio_rw16(sc, 0); 1123 1.1 hannken } else { 1124 1.76 yamaguch if (nsegs > 1) { 1125 1.76 yamaguch uint16_t s; 1126 1.1 hannken 1127 1.76 yamaguch s = vq_alloc_slot(sc, vq, nsegs - 1); 1128 1.76 yamaguch if (s == VRING_DESC_CHAIN_END) { 1129 1.76 yamaguch vq_free_slot(sc, vq, slot); 1130 1.76 yamaguch return EAGAIN; 1131 1.76 yamaguch } 1132 1.76 yamaguch vd->next = virtio_rw16(sc, s); 1133 1.76 yamaguch vd->flags = virtio_rw16(sc, VRING_DESC_F_NEXT); 1134 1.1 hannken } 1135 1.1 hannken 1136 1.73 yamaguch vdx->desc_base = &vq->vq_desc[0]; 1137 1.73 yamaguch vdx->desc_free_idx = slot; 1138 1.1 hannken } 1139 1.73 yamaguch 1140 1.73 yamaguch return 0; 1141 1.1 hannken } 1142 1.1 hannken 1143 1.1 hannken /* 1144 1.1 hannken * enqueue: enqueue a single dmamap. 1145 1.1 hannken */ 1146 1.1 hannken int 1147 1.1 hannken virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1148 1.59 riastrad bus_dmamap_t dmamap, bool write) 1149 1.1 hannken { 1150 1.73 yamaguch struct vring_desc *vds; 1151 1.73 yamaguch struct vring_desc_extra *vdx; 1152 1.73 yamaguch uint16_t s; 1153 1.1 hannken int i; 1154 1.1 hannken 1155 1.1 hannken KASSERT(dmamap->dm_nsegs > 0); 1156 1.1 hannken 1157 1.73 yamaguch vdx = &vq->vq_descx[slot]; 1158 1.73 yamaguch vds = vdx->desc_base; 1159 1.73 yamaguch s = vdx->desc_free_idx; 1160 1.73 yamaguch 1161 1.73 yamaguch KASSERT(vds != NULL); 1162 1.73 yamaguch 1163 1.1 hannken for (i = 0; i < dmamap->dm_nsegs; i++) { 1164 1.73 yamaguch KASSERT(s != VRING_DESC_CHAIN_END); 1165 1.73 yamaguch 1166 1.73 yamaguch vds[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr); 1167 1.73 yamaguch vds[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len); 1168 1.1 hannken if (!write) 1169 1.73 yamaguch vds[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); 1170 1.73 yamaguch 1171 1.73 yamaguch if ((vds[s].flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0) { 1172 1.73 yamaguch s = VRING_DESC_CHAIN_END; 1173 1.73 yamaguch } else { 1174 1.73 yamaguch s = virtio_rw16(sc, vds[s].next); 1175 1.73 yamaguch } 1176 1.1 hannken } 1177 1.73 yamaguch 1178 1.73 yamaguch vdx->desc_free_idx = s; 1179 1.1 hannken 1180 1.1 hannken return 0; 1181 1.1 hannken } 1182 1.1 hannken 1183 1.1 hannken int 1184 1.1 hannken virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1185 1.59 riastrad bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, 1186 1.59 riastrad bool write) 1187 1.1 hannken { 1188 1.73 yamaguch struct vring_desc_extra *vdx; 1189 1.73 yamaguch struct vring_desc *vds; 1190 1.73 yamaguch uint16_t s; 1191 1.73 yamaguch 1192 1.73 yamaguch vdx = &vq->vq_descx[slot]; 1193 1.73 yamaguch vds = vdx->desc_base; 1194 1.73 yamaguch s = vdx->desc_free_idx; 1195 1.1 hannken 1196 1.73 yamaguch KASSERT(s != VRING_DESC_CHAIN_END); 1197 1.73 yamaguch KASSERT(vds != NULL); 1198 1.1 hannken KASSERT(dmamap->dm_nsegs == 1); /* XXX */ 1199 1.59 riastrad KASSERT(dmamap->dm_segs[0].ds_len > start); 1200 1.59 riastrad KASSERT(dmamap->dm_segs[0].ds_len >= start + len); 1201 1.1 hannken 1202 1.73 yamaguch vds[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start); 1203 1.73 yamaguch vds[s].len = virtio_rw32(sc, len); 1204 1.1 hannken if (!write) 1205 1.73 yamaguch vds[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); 1206 1.73 yamaguch 1207 1.73 yamaguch if ((vds[s].flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0) { 1208 1.73 yamaguch s = VRING_DESC_CHAIN_END; 1209 1.73 yamaguch } else { 1210 1.73 yamaguch s = virtio_rw16(sc, vds[s].next); 1211 1.73 yamaguch } 1212 1.73 yamaguch 1213 1.73 yamaguch vdx->desc_free_idx = s; 1214 1.1 hannken 1215 1.1 hannken return 0; 1216 1.1 hannken } 1217 1.1 hannken 1218 1.1 hannken /* 1219 1.1 hannken * enqueue_commit: add it to the aring. 1220 1.1 hannken */ 1221 1.1 hannken int 1222 1.1 hannken virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, 1223 1.59 riastrad bool notifynow) 1224 1.1 hannken { 1225 1.1 hannken 1226 1.1 hannken if (slot < 0) { 1227 1.1 hannken mutex_enter(&vq->vq_aring_lock); 1228 1.1 hannken goto notify; 1229 1.1 hannken } 1230 1.73 yamaguch 1231 1.1 hannken vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); 1232 1.73 yamaguch if (vq->vq_descx[slot].use_indirect) 1233 1.1 hannken vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); 1234 1.73 yamaguch 1235 1.1 hannken mutex_enter(&vq->vq_aring_lock); 1236 1.43 reinoud vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = 1237 1.57 riastrad virtio_rw16(sc, slot); 1238 1.1 hannken 1239 1.1 hannken notify: 1240 1.1 hannken if (notifynow) { 1241 1.43 reinoud uint16_t o, n, t; 1242 1.43 reinoud uint16_t flags; 1243 1.57 riastrad 1244 1.65 jakllsch o = virtio_rw16(sc, vq->vq_avail->idx) - 1; 1245 1.43 reinoud n = vq->vq_avail_idx; 1246 1.43 reinoud 1247 1.57 riastrad /* 1248 1.57 riastrad * Prepare for `device->CPU' (host->guest) transfer 1249 1.57 riastrad * into the buffer. This must happen before we commit 1250 1.57 riastrad * the vq->vq_avail->idx update to ensure we're not 1251 1.57 riastrad * still using the buffer in case program-prior loads 1252 1.57 riastrad * or stores in it get delayed past the store to 1253 1.57 riastrad * vq->vq_avail->idx. 1254 1.57 riastrad */ 1255 1.57 riastrad vq_sync_uring_all(sc, vq, BUS_DMASYNC_PREREAD); 1256 1.57 riastrad 1257 1.57 riastrad /* ensure payload is published, then avail idx */ 1258 1.57 riastrad vq_sync_aring_payload(sc, vq, BUS_DMASYNC_PREWRITE); 1259 1.43 reinoud vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx); 1260 1.57 riastrad vq_sync_aring_header(sc, vq, BUS_DMASYNC_PREWRITE); 1261 1.1 hannken vq->vq_queued++; 1262 1.43 reinoud 1263 1.84 riastrad /* 1264 1.84 riastrad * Ensure we publish the avail idx _before_ we check whether 1265 1.84 riastrad * the host needs to notified. 1266 1.84 riastrad */ 1267 1.84 riastrad paravirt_membar_sync(); 1268 1.84 riastrad 1269 1.43 reinoud if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) { 1270 1.57 riastrad vq_sync_uring_avail(sc, vq, BUS_DMASYNC_POSTREAD); 1271 1.43 reinoud t = virtio_rw16(sc, *vq->vq_avail_event) + 1; 1272 1.43 reinoud if ((uint16_t) (n - t) < (uint16_t) (n - o)) 1273 1.43 reinoud sc->sc_ops->kick(sc, vq->vq_index); 1274 1.43 reinoud } else { 1275 1.57 riastrad vq_sync_uring_header(sc, vq, BUS_DMASYNC_POSTREAD); 1276 1.43 reinoud flags = virtio_rw16(sc, vq->vq_used->flags); 1277 1.43 reinoud if (!(flags & VRING_USED_F_NO_NOTIFY)) 1278 1.43 reinoud sc->sc_ops->kick(sc, vq->vq_index); 1279 1.43 reinoud } 1280 1.1 hannken } 1281 1.1 hannken mutex_exit(&vq->vq_aring_lock); 1282 1.1 hannken 1283 1.1 hannken return 0; 1284 1.1 hannken } 1285 1.1 hannken 1286 1.1 hannken /* 1287 1.1 hannken * enqueue_abort: rollback. 1288 1.1 hannken */ 1289 1.1 hannken int 1290 1.1 hannken virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1291 1.1 hannken { 1292 1.73 yamaguch struct vring_desc_extra *vdx; 1293 1.73 yamaguch 1294 1.73 yamaguch vdx = &vq->vq_descx[slot]; 1295 1.73 yamaguch vdx->desc_free_idx = VRING_DESC_CHAIN_END; 1296 1.73 yamaguch vdx->desc_base = NULL; 1297 1.1 hannken 1298 1.78 yamaguch vq_free_slot(sc, vq, slot); 1299 1.78 yamaguch 1300 1.1 hannken return 0; 1301 1.1 hannken } 1302 1.1 hannken 1303 1.1 hannken /* 1304 1.1 hannken * Dequeue a request. 1305 1.1 hannken */ 1306 1.1 hannken /* 1307 1.1 hannken * dequeue: dequeue a request from uring; dmamap_sync for uring is 1308 1.1 hannken * already done in the interrupt handler. 1309 1.1 hannken */ 1310 1.1 hannken int 1311 1.1 hannken virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, 1312 1.59 riastrad int *slotp, int *lenp) 1313 1.1 hannken { 1314 1.1 hannken uint16_t slot, usedidx; 1315 1.1 hannken 1316 1.43 reinoud if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx)) 1317 1.1 hannken return ENOENT; 1318 1.1 hannken mutex_enter(&vq->vq_uring_lock); 1319 1.1 hannken usedidx = vq->vq_used_idx++; 1320 1.1 hannken mutex_exit(&vq->vq_uring_lock); 1321 1.1 hannken usedidx %= vq->vq_num; 1322 1.43 reinoud slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id); 1323 1.1 hannken 1324 1.73 yamaguch if (vq->vq_descx[slot].use_indirect) 1325 1.1 hannken vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); 1326 1.1 hannken 1327 1.1 hannken if (slotp) 1328 1.1 hannken *slotp = slot; 1329 1.1 hannken if (lenp) 1330 1.43 reinoud *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len); 1331 1.1 hannken 1332 1.1 hannken return 0; 1333 1.1 hannken } 1334 1.1 hannken 1335 1.1 hannken /* 1336 1.1 hannken * dequeue_commit: complete dequeue; the slot is recycled for future use. 1337 1.1 hannken * if you forget to call this the slot will be leaked. 1338 1.1 hannken */ 1339 1.1 hannken int 1340 1.1 hannken virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) 1341 1.1 hannken { 1342 1.73 yamaguch struct vring_desc_extra *vdx; 1343 1.73 yamaguch 1344 1.73 yamaguch vdx = &vq->vq_descx[slot]; 1345 1.73 yamaguch vdx->desc_base = NULL; 1346 1.73 yamaguch vdx->desc_free_idx = VRING_DESC_CHAIN_END; 1347 1.1 hannken 1348 1.78 yamaguch vq_free_slot(sc, vq, slot); 1349 1.78 yamaguch 1350 1.1 hannken return 0; 1351 1.1 hannken } 1352 1.18 pgoyette 1353 1.22 jdolecek /* 1354 1.22 jdolecek * Attach a child, fill all the members. 1355 1.22 jdolecek */ 1356 1.22 jdolecek void 1357 1.48 skrll virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, 1358 1.66 yamaguch uint64_t req_features, const char *feat_bits) 1359 1.22 jdolecek { 1360 1.43 reinoud char buf[1024]; 1361 1.22 jdolecek 1362 1.74 yamaguch KASSERT(sc->sc_child == NULL); 1363 1.75 yamaguch KASSERT(sc->sc_child_state == VIRTIO_NO_CHILD); 1364 1.74 yamaguch 1365 1.22 jdolecek sc->sc_child = child; 1366 1.22 jdolecek sc->sc_ipl = ipl; 1367 1.22 jdolecek 1368 1.43 reinoud virtio_negotiate_features(sc, req_features); 1369 1.43 reinoud snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features); 1370 1.43 reinoud aprint_normal(": features: %s\n", buf); 1371 1.22 jdolecek aprint_naive("\n"); 1372 1.22 jdolecek } 1373 1.22 jdolecek 1374 1.66 yamaguch int 1375 1.66 yamaguch virtio_child_attach_finish(struct virtio_softc *sc, 1376 1.66 yamaguch struct virtqueue *vqs, size_t nvqs, 1377 1.67 yamaguch virtio_callback config_change, 1378 1.66 yamaguch int req_flags) 1379 1.37 yamaguch { 1380 1.69 yamaguch size_t i; 1381 1.66 yamaguch int r; 1382 1.39 yamaguch 1383 1.66 yamaguch #ifdef DIAGNOSTIC 1384 1.66 yamaguch KASSERT(nvqs > 0); 1385 1.66 yamaguch #define VIRTIO_ASSERT_FLAGS (VIRTIO_F_INTR_SOFTINT | VIRTIO_F_INTR_PERVQ) 1386 1.66 yamaguch KASSERT((req_flags & VIRTIO_ASSERT_FLAGS) != VIRTIO_ASSERT_FLAGS); 1387 1.66 yamaguch #undef VIRTIO_ASSERT_FLAGS 1388 1.66 yamaguch 1389 1.69 yamaguch for (i = 0; i < nvqs; i++){ 1390 1.69 yamaguch KASSERT(vqs[i].vq_index == i); 1391 1.69 yamaguch KASSERT(vqs[i].vq_intrhand != NULL); 1392 1.69 yamaguch KASSERT(vqs[i].vq_done == NULL || 1393 1.69 yamaguch vqs[i].vq_intrhand == virtio_vq_done); 1394 1.66 yamaguch } 1395 1.66 yamaguch #endif 1396 1.66 yamaguch 1397 1.37 yamaguch 1398 1.37 yamaguch sc->sc_vqs = vqs; 1399 1.66 yamaguch sc->sc_nvqs = nvqs; 1400 1.66 yamaguch sc->sc_config_change = config_change; 1401 1.67 yamaguch sc->sc_intrhand = virtio_vq_intr; 1402 1.66 yamaguch sc->sc_flags = req_flags; 1403 1.37 yamaguch 1404 1.69 yamaguch /* set the vq address */ 1405 1.69 yamaguch for (i = 0; i < nvqs; i++) { 1406 1.69 yamaguch sc->sc_ops->setup_queue(sc, vqs[i].vq_index, 1407 1.69 yamaguch vqs[i].vq_dmamap->dm_segs[0].ds_addr); 1408 1.69 yamaguch } 1409 1.69 yamaguch 1410 1.50 yamaguch r = sc->sc_ops->alloc_interrupts(sc); 1411 1.50 yamaguch if (r != 0) { 1412 1.59 riastrad aprint_error_dev(sc->sc_dev, 1413 1.59 riastrad "failed to allocate interrupts\n"); 1414 1.50 yamaguch goto fail; 1415 1.50 yamaguch } 1416 1.50 yamaguch 1417 1.51 yamaguch r = sc->sc_ops->setup_interrupts(sc, 0); 1418 1.22 jdolecek if (r != 0) { 1419 1.22 jdolecek aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n"); 1420 1.52 yamaguch goto fail; 1421 1.31 jakllsch } 1422 1.31 jakllsch 1423 1.31 jakllsch KASSERT(sc->sc_soft_ih == NULL); 1424 1.43 reinoud if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) { 1425 1.48 skrll u_int flags = SOFTINT_NET; 1426 1.43 reinoud if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1427 1.31 jakllsch flags |= SOFTINT_MPSAFE; 1428 1.31 jakllsch 1429 1.59 riastrad sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, 1430 1.59 riastrad sc); 1431 1.31 jakllsch if (sc->sc_soft_ih == NULL) { 1432 1.31 jakllsch sc->sc_ops->free_interrupts(sc); 1433 1.31 jakllsch aprint_error_dev(sc->sc_dev, 1434 1.31 jakllsch "failed to establish soft interrupt\n"); 1435 1.31 jakllsch goto fail; 1436 1.31 jakllsch } 1437 1.22 jdolecek } 1438 1.22 jdolecek 1439 1.75 yamaguch sc->sc_child_state = VIRTIO_CHILD_ATTACH_FINISHED; 1440 1.22 jdolecek virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); 1441 1.31 jakllsch return 0; 1442 1.22 jdolecek 1443 1.31 jakllsch fail: 1444 1.37 yamaguch if (sc->sc_soft_ih) { 1445 1.37 yamaguch softint_disestablish(sc->sc_soft_ih); 1446 1.37 yamaguch sc->sc_soft_ih = NULL; 1447 1.37 yamaguch } 1448 1.37 yamaguch 1449 1.52 yamaguch sc->sc_ops->free_interrupts(sc); 1450 1.52 yamaguch 1451 1.31 jakllsch virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 1452 1.31 jakllsch return 1; 1453 1.22 jdolecek } 1454 1.22 jdolecek 1455 1.22 jdolecek void 1456 1.22 jdolecek virtio_child_detach(struct virtio_softc *sc) 1457 1.22 jdolecek { 1458 1.74 yamaguch 1459 1.74 yamaguch /* already detached */ 1460 1.75 yamaguch if (sc->sc_child == NULL) 1461 1.74 yamaguch return; 1462 1.74 yamaguch 1463 1.22 jdolecek 1464 1.22 jdolecek virtio_device_reset(sc); 1465 1.22 jdolecek 1466 1.31 jakllsch sc->sc_ops->free_interrupts(sc); 1467 1.31 jakllsch 1468 1.31 jakllsch if (sc->sc_soft_ih) { 1469 1.31 jakllsch softint_disestablish(sc->sc_soft_ih); 1470 1.31 jakllsch sc->sc_soft_ih = NULL; 1471 1.31 jakllsch } 1472 1.74 yamaguch 1473 1.75 yamaguch sc->sc_vqs = NULL; 1474 1.75 yamaguch sc->sc_child = NULL; 1475 1.22 jdolecek } 1476 1.22 jdolecek 1477 1.22 jdolecek void 1478 1.22 jdolecek virtio_child_attach_failed(struct virtio_softc *sc) 1479 1.22 jdolecek { 1480 1.22 jdolecek virtio_child_detach(sc); 1481 1.22 jdolecek 1482 1.22 jdolecek virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 1483 1.22 jdolecek 1484 1.75 yamaguch sc->sc_child_state = VIRTIO_CHILD_ATTACH_FAILED; 1485 1.22 jdolecek } 1486 1.22 jdolecek 1487 1.22 jdolecek bus_dma_tag_t 1488 1.22 jdolecek virtio_dmat(struct virtio_softc *sc) 1489 1.22 jdolecek { 1490 1.22 jdolecek return sc->sc_dmat; 1491 1.22 jdolecek } 1492 1.22 jdolecek 1493 1.22 jdolecek device_t 1494 1.22 jdolecek virtio_child(struct virtio_softc *sc) 1495 1.22 jdolecek { 1496 1.22 jdolecek return sc->sc_child; 1497 1.22 jdolecek } 1498 1.22 jdolecek 1499 1.22 jdolecek int 1500 1.22 jdolecek virtio_intrhand(struct virtio_softc *sc) 1501 1.22 jdolecek { 1502 1.54 uwe return (*sc->sc_intrhand)(sc); 1503 1.22 jdolecek } 1504 1.22 jdolecek 1505 1.43 reinoud uint64_t 1506 1.22 jdolecek virtio_features(struct virtio_softc *sc) 1507 1.22 jdolecek { 1508 1.43 reinoud return sc->sc_active_features; 1509 1.22 jdolecek } 1510 1.22 jdolecek 1511 1.83 martin bool 1512 1.83 martin virtio_version_1(struct virtio_softc *sc) 1513 1.83 martin { 1514 1.83 martin return sc->sc_version_1; 1515 1.83 martin } 1516 1.83 martin 1517 1.35 jakllsch int 1518 1.43 reinoud virtio_attach_failed(struct virtio_softc *sc) 1519 1.35 jakllsch { 1520 1.43 reinoud device_t self = sc->sc_dev; 1521 1.35 jakllsch 1522 1.43 reinoud /* no error if its not connected, but its failed */ 1523 1.43 reinoud if (sc->sc_childdevid == 0) 1524 1.43 reinoud return 1; 1525 1.36 jmcneill 1526 1.75 yamaguch if (sc->sc_child == NULL) { 1527 1.75 yamaguch switch (sc->sc_child_state) { 1528 1.75 yamaguch case VIRTIO_CHILD_ATTACH_FAILED: 1529 1.75 yamaguch aprint_error_dev(self, 1530 1.75 yamaguch "virtio configuration failed\n"); 1531 1.75 yamaguch break; 1532 1.75 yamaguch case VIRTIO_NO_CHILD: 1533 1.75 yamaguch aprint_error_dev(self, 1534 1.75 yamaguch "no matching child driver; not configured\n"); 1535 1.75 yamaguch break; 1536 1.75 yamaguch default: 1537 1.75 yamaguch /* sanity check */ 1538 1.75 yamaguch aprint_error_dev(self, 1539 1.75 yamaguch "virtio internal error, " 1540 1.75 yamaguch "child driver is not configured\n"); 1541 1.75 yamaguch break; 1542 1.75 yamaguch } 1543 1.74 yamaguch 1544 1.43 reinoud return 1; 1545 1.43 reinoud } 1546 1.35 jakllsch 1547 1.44 reinoud /* sanity check */ 1548 1.75 yamaguch if (sc->sc_child_state != VIRTIO_CHILD_ATTACH_FINISHED) { 1549 1.44 reinoud aprint_error_dev(self, "virtio internal error, child driver " 1550 1.59 riastrad "signaled OK but didn't initialize interrupts\n"); 1551 1.44 reinoud return 1; 1552 1.44 reinoud } 1553 1.44 reinoud 1554 1.43 reinoud return 0; 1555 1.43 reinoud } 1556 1.43 reinoud 1557 1.43 reinoud void 1558 1.43 reinoud virtio_print_device_type(device_t self, int id, int revision) 1559 1.43 reinoud { 1560 1.58 riastrad aprint_normal_dev(self, "%s device (id %d, rev. 0x%02x)\n", 1561 1.58 riastrad (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"), 1562 1.58 riastrad id, 1563 1.58 riastrad revision); 1564 1.35 jakllsch } 1565 1.35 jakllsch 1566 1.43 reinoud 1567 1.32 jakllsch MODULE(MODULE_CLASS_DRIVER, virtio, NULL); 1568 1.48 skrll 1569 1.18 pgoyette #ifdef _MODULE 1570 1.18 pgoyette #include "ioconf.c" 1571 1.18 pgoyette #endif 1572 1.48 skrll 1573 1.18 pgoyette static int 1574 1.18 pgoyette virtio_modcmd(modcmd_t cmd, void *opaque) 1575 1.18 pgoyette { 1576 1.18 pgoyette int error = 0; 1577 1.48 skrll 1578 1.18 pgoyette #ifdef _MODULE 1579 1.18 pgoyette switch (cmd) { 1580 1.18 pgoyette case MODULE_CMD_INIT: 1581 1.48 skrll error = config_init_component(cfdriver_ioconf_virtio, 1582 1.48 skrll cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1583 1.18 pgoyette break; 1584 1.18 pgoyette case MODULE_CMD_FINI: 1585 1.48 skrll error = config_fini_component(cfdriver_ioconf_virtio, 1586 1.18 pgoyette cfattach_ioconf_virtio, cfdata_ioconf_virtio); 1587 1.18 pgoyette break; 1588 1.18 pgoyette default: 1589 1.18 pgoyette error = ENOTTY; 1590 1.18 pgoyette break; 1591 1.18 pgoyette } 1592 1.18 pgoyette #endif 1593 1.48 skrll 1594 1.48 skrll return error; 1595 1.18 pgoyette } 1596