1 1.20 reinoud /* $NetBSD: libnvmm.c,v 1.20 2021/04/06 08:40:17 reinoud Exp $ */ 2 1.1 maxv 3 1.1 maxv /* 4 1.19 maxv * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 5 1.1 maxv * All rights reserved. 6 1.1 maxv * 7 1.19 maxv * This code is part of the NVMM hypervisor. 8 1.1 maxv * 9 1.1 maxv * Redistribution and use in source and binary forms, with or without 10 1.1 maxv * modification, are permitted provided that the following conditions 11 1.1 maxv * are met: 12 1.1 maxv * 1. Redistributions of source code must retain the above copyright 13 1.1 maxv * notice, this list of conditions and the following disclaimer. 14 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 maxv * notice, this list of conditions and the following disclaimer in the 16 1.1 maxv * documentation and/or other materials provided with the distribution. 17 1.1 maxv * 18 1.19 maxv * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 1.19 maxv * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 1.19 maxv * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 1.19 maxv * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 1.19 maxv * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 1.19 maxv * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 1.19 maxv * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 1.19 maxv * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 1.19 maxv * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 1.19 maxv * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 1.19 maxv * SUCH DAMAGE. 29 1.1 maxv */ 30 1.1 maxv 31 1.1 maxv #include <sys/cdefs.h> 32 1.1 maxv 33 1.1 maxv #include <stdio.h> 34 1.1 maxv #include <stdlib.h> 35 1.1 maxv #include <string.h> 36 1.1 maxv #include <unistd.h> 37 1.1 maxv #include <fcntl.h> 38 1.1 maxv #include <errno.h> 39 1.1 maxv #include <sys/ioctl.h> 40 1.1 maxv #include <sys/mman.h> 41 1.3 maxv #include <sys/queue.h> 42 1.10 maxv #include <machine/vmparam.h> 43 1.1 maxv 44 1.1 maxv #include "nvmm.h" 45 1.1 maxv 46 1.10 maxv static struct nvmm_capability __capability; 47 1.10 maxv 48 1.10 maxv #ifdef __x86_64__ 49 1.10 maxv #include "libnvmm_x86.c" 50 1.10 maxv #endif 51 1.6 maxv 52 1.3 maxv typedef struct __area { 53 1.3 maxv LIST_ENTRY(__area) list; 54 1.3 maxv gpaddr_t gpa; 55 1.3 maxv uintptr_t hva; 56 1.3 maxv size_t size; 57 1.8 maxv nvmm_prot_t prot; 58 1.3 maxv } area_t; 59 1.3 maxv 60 1.3 maxv typedef LIST_HEAD(, __area) area_list_t; 61 1.3 maxv 62 1.1 maxv static int nvmm_fd = -1; 63 1.1 maxv 64 1.1 maxv /* -------------------------------------------------------------------------- */ 65 1.1 maxv 66 1.4 maxv static bool 67 1.4 maxv __area_isvalid(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, 68 1.1 maxv size_t size) 69 1.1 maxv { 70 1.3 maxv area_list_t *areas = mach->areas; 71 1.4 maxv area_t *ent; 72 1.3 maxv 73 1.4 maxv LIST_FOREACH(ent, areas, list) { 74 1.4 maxv /* Collision on GPA */ 75 1.4 maxv if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) { 76 1.4 maxv return false; 77 1.1 maxv } 78 1.5 maxv if (gpa + size > ent->gpa && 79 1.5 maxv gpa + size <= ent->gpa + ent->size) { 80 1.4 maxv return false; 81 1.3 maxv } 82 1.4 maxv if (gpa <= ent->gpa && gpa + size >= ent->gpa + ent->size) { 83 1.4 maxv return false; 84 1.1 maxv } 85 1.1 maxv } 86 1.1 maxv 87 1.4 maxv return true; 88 1.3 maxv } 89 1.3 maxv 90 1.3 maxv static int 91 1.8 maxv __area_add(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, size_t size, 92 1.8 maxv int prot) 93 1.3 maxv { 94 1.3 maxv area_list_t *areas = mach->areas; 95 1.8 maxv nvmm_prot_t nprot; 96 1.3 maxv area_t *area; 97 1.4 maxv 98 1.8 maxv nprot = 0; 99 1.8 maxv if (prot & PROT_READ) 100 1.8 maxv nprot |= NVMM_PROT_READ; 101 1.8 maxv if (prot & PROT_WRITE) 102 1.8 maxv nprot |= NVMM_PROT_WRITE; 103 1.8 maxv if (prot & PROT_EXEC) 104 1.8 maxv nprot |= NVMM_PROT_EXEC; 105 1.8 maxv 106 1.4 maxv if (!__area_isvalid(mach, hva, gpa, size)) { 107 1.4 maxv errno = EINVAL; 108 1.4 maxv return -1; 109 1.4 maxv } 110 1.3 maxv 111 1.3 maxv area = malloc(sizeof(*area)); 112 1.3 maxv if (area == NULL) 113 1.1 maxv return -1; 114 1.1 maxv area->gpa = gpa; 115 1.1 maxv area->hva = hva; 116 1.1 maxv area->size = size; 117 1.8 maxv area->prot = nprot; 118 1.1 maxv 119 1.4 maxv LIST_INSERT_HEAD(areas, area, list); 120 1.4 maxv 121 1.4 maxv return 0; 122 1.4 maxv } 123 1.4 maxv 124 1.4 maxv static int 125 1.4 maxv __area_delete(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, 126 1.4 maxv size_t size) 127 1.4 maxv { 128 1.4 maxv area_list_t *areas = mach->areas; 129 1.4 maxv area_t *ent, *nxt; 130 1.4 maxv 131 1.4 maxv LIST_FOREACH_SAFE(ent, areas, list, nxt) { 132 1.4 maxv if (hva == ent->hva && gpa == ent->gpa && size == ent->size) { 133 1.4 maxv LIST_REMOVE(ent, list); 134 1.4 maxv free(ent); 135 1.4 maxv return 0; 136 1.4 maxv } 137 1.3 maxv } 138 1.3 maxv 139 1.4 maxv return -1; 140 1.1 maxv } 141 1.1 maxv 142 1.3 maxv static void 143 1.3 maxv __area_remove_all(struct nvmm_machine *mach) 144 1.1 maxv { 145 1.3 maxv area_list_t *areas = mach->areas; 146 1.3 maxv area_t *ent; 147 1.1 maxv 148 1.3 maxv while ((ent = LIST_FIRST(areas)) != NULL) { 149 1.3 maxv LIST_REMOVE(ent, list); 150 1.3 maxv free(ent); 151 1.1 maxv } 152 1.1 maxv 153 1.3 maxv free(areas); 154 1.1 maxv } 155 1.1 maxv 156 1.1 maxv /* -------------------------------------------------------------------------- */ 157 1.1 maxv 158 1.17 maxv int 159 1.1 maxv nvmm_init(void) 160 1.1 maxv { 161 1.1 maxv if (nvmm_fd != -1) 162 1.1 maxv return 0; 163 1.15 maxv nvmm_fd = open("/dev/nvmm", O_RDONLY | O_CLOEXEC); 164 1.1 maxv if (nvmm_fd == -1) 165 1.1 maxv return -1; 166 1.10 maxv if (nvmm_capability(&__capability) == -1) { 167 1.10 maxv close(nvmm_fd); 168 1.10 maxv nvmm_fd = -1; 169 1.10 maxv return -1; 170 1.10 maxv } 171 1.15 maxv if (__capability.version != NVMM_KERN_VERSION) { 172 1.15 maxv close(nvmm_fd); 173 1.15 maxv nvmm_fd = -1; 174 1.15 maxv errno = EPROGMISMATCH; 175 1.15 maxv return -1; 176 1.15 maxv } 177 1.15 maxv 178 1.1 maxv return 0; 179 1.1 maxv } 180 1.1 maxv 181 1.1 maxv int 182 1.18 maxv nvmm_root_init(void) 183 1.18 maxv { 184 1.18 maxv if (nvmm_fd != -1) 185 1.18 maxv return 0; 186 1.18 maxv nvmm_fd = open("/dev/nvmm", O_WRONLY | O_CLOEXEC); 187 1.18 maxv if (nvmm_fd == -1) 188 1.18 maxv return -1; 189 1.18 maxv if (nvmm_capability(&__capability) == -1) { 190 1.18 maxv close(nvmm_fd); 191 1.18 maxv nvmm_fd = -1; 192 1.18 maxv return -1; 193 1.18 maxv } 194 1.18 maxv if (__capability.version != NVMM_KERN_VERSION) { 195 1.18 maxv close(nvmm_fd); 196 1.18 maxv nvmm_fd = -1; 197 1.18 maxv errno = EPROGMISMATCH; 198 1.18 maxv return -1; 199 1.18 maxv } 200 1.18 maxv 201 1.18 maxv return 0; 202 1.18 maxv } 203 1.18 maxv 204 1.18 maxv int 205 1.1 maxv nvmm_capability(struct nvmm_capability *cap) 206 1.1 maxv { 207 1.1 maxv struct nvmm_ioc_capability args; 208 1.1 maxv int ret; 209 1.1 maxv 210 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_CAPABILITY, &args); 211 1.1 maxv if (ret == -1) 212 1.1 maxv return -1; 213 1.1 maxv 214 1.1 maxv memcpy(cap, &args.cap, sizeof(args.cap)); 215 1.1 maxv 216 1.1 maxv return 0; 217 1.1 maxv } 218 1.1 maxv 219 1.1 maxv int 220 1.1 maxv nvmm_machine_create(struct nvmm_machine *mach) 221 1.1 maxv { 222 1.1 maxv struct nvmm_ioc_machine_create args; 223 1.10 maxv struct nvmm_comm_page **pages; 224 1.3 maxv area_list_t *areas; 225 1.1 maxv int ret; 226 1.1 maxv 227 1.3 maxv areas = calloc(1, sizeof(*areas)); 228 1.3 maxv if (areas == NULL) 229 1.3 maxv return -1; 230 1.3 maxv 231 1.10 maxv pages = calloc(__capability.max_vcpus, sizeof(*pages)); 232 1.10 maxv if (pages == NULL) { 233 1.10 maxv free(areas); 234 1.10 maxv return -1; 235 1.10 maxv } 236 1.10 maxv 237 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CREATE, &args); 238 1.3 maxv if (ret == -1) { 239 1.3 maxv free(areas); 240 1.1 maxv return -1; 241 1.3 maxv } 242 1.1 maxv 243 1.10 maxv LIST_INIT(areas); 244 1.10 maxv 245 1.1 maxv memset(mach, 0, sizeof(*mach)); 246 1.10 maxv mach->machid = args.machid; 247 1.10 maxv mach->pages = pages; 248 1.3 maxv mach->areas = areas; 249 1.1 maxv 250 1.1 maxv return 0; 251 1.1 maxv } 252 1.1 maxv 253 1.1 maxv int 254 1.1 maxv nvmm_machine_destroy(struct nvmm_machine *mach) 255 1.1 maxv { 256 1.1 maxv struct nvmm_ioc_machine_destroy args; 257 1.1 maxv int ret; 258 1.1 maxv 259 1.1 maxv args.machid = mach->machid; 260 1.1 maxv 261 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_DESTROY, &args); 262 1.1 maxv if (ret == -1) 263 1.1 maxv return -1; 264 1.1 maxv 265 1.3 maxv __area_remove_all(mach); 266 1.10 maxv free(mach->pages); 267 1.1 maxv 268 1.1 maxv return 0; 269 1.1 maxv } 270 1.1 maxv 271 1.1 maxv int 272 1.1 maxv nvmm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *conf) 273 1.1 maxv { 274 1.1 maxv struct nvmm_ioc_machine_configure args; 275 1.1 maxv int ret; 276 1.1 maxv 277 1.1 maxv args.machid = mach->machid; 278 1.1 maxv args.op = op; 279 1.1 maxv args.conf = conf; 280 1.1 maxv 281 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CONFIGURE, &args); 282 1.1 maxv if (ret == -1) 283 1.1 maxv return -1; 284 1.1 maxv 285 1.1 maxv return 0; 286 1.1 maxv } 287 1.1 maxv 288 1.1 maxv int 289 1.14 maxv nvmm_vcpu_create(struct nvmm_machine *mach, nvmm_cpuid_t cpuid, 290 1.14 maxv struct nvmm_vcpu *vcpu) 291 1.1 maxv { 292 1.1 maxv struct nvmm_ioc_vcpu_create args; 293 1.10 maxv struct nvmm_comm_page *comm; 294 1.1 maxv int ret; 295 1.1 maxv 296 1.1 maxv args.machid = mach->machid; 297 1.1 maxv args.cpuid = cpuid; 298 1.1 maxv 299 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_CREATE, &args); 300 1.1 maxv if (ret == -1) 301 1.1 maxv return -1; 302 1.1 maxv 303 1.10 maxv comm = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FILE, 304 1.10 maxv nvmm_fd, NVMM_COMM_OFF(mach->machid, cpuid)); 305 1.10 maxv if (comm == MAP_FAILED) 306 1.10 maxv return -1; 307 1.10 maxv 308 1.10 maxv mach->pages[cpuid] = comm; 309 1.10 maxv 310 1.14 maxv vcpu->cpuid = cpuid; 311 1.14 maxv vcpu->state = &comm->state; 312 1.14 maxv vcpu->event = &comm->event; 313 1.20 reinoud vcpu->stop = &comm->stop; 314 1.14 maxv vcpu->exit = malloc(sizeof(*vcpu->exit)); 315 1.14 maxv 316 1.1 maxv return 0; 317 1.1 maxv } 318 1.1 maxv 319 1.1 maxv int 320 1.14 maxv nvmm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 321 1.1 maxv { 322 1.1 maxv struct nvmm_ioc_vcpu_destroy args; 323 1.10 maxv struct nvmm_comm_page *comm; 324 1.1 maxv int ret; 325 1.1 maxv 326 1.1 maxv args.machid = mach->machid; 327 1.14 maxv args.cpuid = vcpu->cpuid; 328 1.1 maxv 329 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_DESTROY, &args); 330 1.1 maxv if (ret == -1) 331 1.1 maxv return -1; 332 1.1 maxv 333 1.14 maxv comm = mach->pages[vcpu->cpuid]; 334 1.10 maxv munmap(comm, PAGE_SIZE); 335 1.14 maxv free(vcpu->exit); 336 1.10 maxv 337 1.1 maxv return 0; 338 1.1 maxv } 339 1.1 maxv 340 1.1 maxv int 341 1.15 maxv nvmm_vcpu_configure(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 342 1.15 maxv uint64_t op, void *conf) 343 1.15 maxv { 344 1.15 maxv struct nvmm_ioc_vcpu_configure args; 345 1.15 maxv int ret; 346 1.15 maxv 347 1.16 maxv switch (op) { 348 1.16 maxv case NVMM_VCPU_CONF_CALLBACKS: 349 1.16 maxv memcpy(&vcpu->cbs, conf, sizeof(vcpu->cbs)); 350 1.16 maxv return 0; 351 1.16 maxv } 352 1.16 maxv 353 1.15 maxv args.machid = mach->machid; 354 1.15 maxv args.cpuid = vcpu->cpuid; 355 1.15 maxv args.op = op; 356 1.15 maxv args.conf = conf; 357 1.15 maxv 358 1.15 maxv ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_CONFIGURE, &args); 359 1.15 maxv if (ret == -1) 360 1.15 maxv return -1; 361 1.15 maxv 362 1.15 maxv return 0; 363 1.15 maxv } 364 1.15 maxv 365 1.15 maxv int 366 1.14 maxv nvmm_vcpu_setstate(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 367 1.14 maxv uint64_t flags) 368 1.1 maxv { 369 1.10 maxv struct nvmm_comm_page *comm; 370 1.1 maxv 371 1.14 maxv comm = mach->pages[vcpu->cpuid]; 372 1.10 maxv comm->state_commit |= flags; 373 1.10 maxv comm->state_cached |= flags; 374 1.1 maxv 375 1.1 maxv return 0; 376 1.1 maxv } 377 1.1 maxv 378 1.1 maxv int 379 1.14 maxv nvmm_vcpu_getstate(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu, 380 1.14 maxv uint64_t flags) 381 1.1 maxv { 382 1.1 maxv struct nvmm_ioc_vcpu_getstate args; 383 1.10 maxv struct nvmm_comm_page *comm; 384 1.1 maxv int ret; 385 1.1 maxv 386 1.14 maxv comm = mach->pages[vcpu->cpuid]; 387 1.10 maxv 388 1.10 maxv if (__predict_true((flags & ~comm->state_cached) == 0)) { 389 1.14 maxv return 0; 390 1.10 maxv } 391 1.10 maxv comm->state_wanted = flags & ~comm->state_cached; 392 1.10 maxv 393 1.1 maxv args.machid = mach->machid; 394 1.14 maxv args.cpuid = vcpu->cpuid; 395 1.1 maxv 396 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_GETSTATE, &args); 397 1.1 maxv if (ret == -1) 398 1.1 maxv return -1; 399 1.1 maxv 400 1.1 maxv return 0; 401 1.1 maxv } 402 1.1 maxv 403 1.1 maxv int 404 1.14 maxv nvmm_vcpu_inject(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 405 1.1 maxv { 406 1.12 maxv struct nvmm_comm_page *comm; 407 1.1 maxv 408 1.14 maxv comm = mach->pages[vcpu->cpuid]; 409 1.12 maxv comm->event_commit = true; 410 1.1 maxv 411 1.1 maxv return 0; 412 1.1 maxv } 413 1.1 maxv 414 1.1 maxv int 415 1.14 maxv nvmm_vcpu_run(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 416 1.1 maxv { 417 1.1 maxv struct nvmm_ioc_vcpu_run args; 418 1.1 maxv int ret; 419 1.1 maxv 420 1.1 maxv args.machid = mach->machid; 421 1.14 maxv args.cpuid = vcpu->cpuid; 422 1.1 maxv memset(&args.exit, 0, sizeof(args.exit)); 423 1.1 maxv 424 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_RUN, &args); 425 1.1 maxv if (ret == -1) 426 1.1 maxv return -1; 427 1.1 maxv 428 1.14 maxv /* No comm support yet, just copy. */ 429 1.14 maxv memcpy(vcpu->exit, &args.exit, sizeof(args.exit)); 430 1.1 maxv 431 1.1 maxv return 0; 432 1.1 maxv } 433 1.1 maxv 434 1.1 maxv int 435 1.1 maxv nvmm_gpa_map(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, 436 1.7 maxv size_t size, int prot) 437 1.1 maxv { 438 1.1 maxv struct nvmm_ioc_gpa_map args; 439 1.1 maxv int ret; 440 1.1 maxv 441 1.8 maxv ret = __area_add(mach, hva, gpa, size, prot); 442 1.3 maxv if (ret == -1) 443 1.3 maxv return -1; 444 1.3 maxv 445 1.1 maxv args.machid = mach->machid; 446 1.1 maxv args.hva = hva; 447 1.1 maxv args.gpa = gpa; 448 1.1 maxv args.size = size; 449 1.7 maxv args.prot = prot; 450 1.1 maxv 451 1.1 maxv ret = ioctl(nvmm_fd, NVMM_IOC_GPA_MAP, &args); 452 1.1 maxv if (ret == -1) { 453 1.3 maxv /* Can't recover. */ 454 1.3 maxv abort(); 455 1.1 maxv } 456 1.1 maxv 457 1.1 maxv return 0; 458 1.1 maxv } 459 1.1 maxv 460 1.1 maxv int 461 1.1 maxv nvmm_gpa_unmap(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, 462 1.1 maxv size_t size) 463 1.1 maxv { 464 1.4 maxv struct nvmm_ioc_gpa_unmap args; 465 1.4 maxv int ret; 466 1.4 maxv 467 1.4 maxv ret = __area_delete(mach, hva, gpa, size); 468 1.4 maxv if (ret == -1) 469 1.4 maxv return -1; 470 1.4 maxv 471 1.4 maxv args.machid = mach->machid; 472 1.4 maxv args.gpa = gpa; 473 1.4 maxv args.size = size; 474 1.4 maxv 475 1.4 maxv ret = ioctl(nvmm_fd, NVMM_IOC_GPA_UNMAP, &args); 476 1.5 maxv if (ret == -1) { 477 1.5 maxv /* Can't recover. */ 478 1.5 maxv abort(); 479 1.5 maxv } 480 1.5 maxv 481 1.5 maxv return 0; 482 1.5 maxv } 483 1.5 maxv 484 1.5 maxv int 485 1.5 maxv nvmm_hva_map(struct nvmm_machine *mach, uintptr_t hva, size_t size) 486 1.5 maxv { 487 1.5 maxv struct nvmm_ioc_hva_map args; 488 1.5 maxv int ret; 489 1.5 maxv 490 1.5 maxv args.machid = mach->machid; 491 1.5 maxv args.hva = hva; 492 1.5 maxv args.size = size; 493 1.5 maxv 494 1.5 maxv ret = ioctl(nvmm_fd, NVMM_IOC_HVA_MAP, &args); 495 1.4 maxv if (ret == -1) 496 1.4 maxv return -1; 497 1.4 maxv 498 1.5 maxv return 0; 499 1.5 maxv } 500 1.4 maxv 501 1.5 maxv int 502 1.5 maxv nvmm_hva_unmap(struct nvmm_machine *mach, uintptr_t hva, size_t size) 503 1.5 maxv { 504 1.6 maxv struct nvmm_ioc_hva_unmap args; 505 1.5 maxv int ret; 506 1.5 maxv 507 1.5 maxv args.machid = mach->machid; 508 1.5 maxv args.hva = hva; 509 1.5 maxv args.size = size; 510 1.5 maxv 511 1.6 maxv ret = ioctl(nvmm_fd, NVMM_IOC_HVA_UNMAP, &args); 512 1.5 maxv if (ret == -1) 513 1.5 maxv return -1; 514 1.5 maxv 515 1.5 maxv return 0; 516 1.1 maxv } 517 1.1 maxv 518 1.1 maxv /* 519 1.1 maxv * nvmm_gva_to_gpa(): architecture-specific. 520 1.1 maxv */ 521 1.1 maxv 522 1.1 maxv int 523 1.8 maxv nvmm_gpa_to_hva(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t *hva, 524 1.8 maxv nvmm_prot_t *prot) 525 1.1 maxv { 526 1.3 maxv area_list_t *areas = mach->areas; 527 1.3 maxv area_t *ent; 528 1.1 maxv 529 1.3 maxv LIST_FOREACH(ent, areas, list) { 530 1.5 maxv if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) { 531 1.5 maxv *hva = ent->hva + (gpa - ent->gpa); 532 1.8 maxv *prot = ent->prot; 533 1.5 maxv return 0; 534 1.1 maxv } 535 1.1 maxv } 536 1.1 maxv 537 1.1 maxv errno = ENOENT; 538 1.1 maxv return -1; 539 1.1 maxv } 540 1.1 maxv 541 1.1 maxv /* 542 1.1 maxv * nvmm_assist_io(): architecture-specific. 543 1.1 maxv */ 544 1.6 maxv 545 1.6 maxv /* 546 1.6 maxv * nvmm_assist_mem(): architecture-specific. 547 1.6 maxv */ 548 1.6 maxv 549 1.9 maxv int 550 1.9 maxv nvmm_ctl(int op, void *data, size_t size) 551 1.9 maxv { 552 1.9 maxv struct nvmm_ioc_ctl args; 553 1.9 maxv int ret; 554 1.9 maxv 555 1.9 maxv args.op = op; 556 1.9 maxv args.data = data; 557 1.9 maxv args.size = size; 558 1.9 maxv 559 1.9 maxv ret = ioctl(nvmm_fd, NVMM_IOC_CTL, &args); 560 1.9 maxv if (ret == -1) 561 1.9 maxv return -1; 562 1.9 maxv 563 1.9 maxv return 0; 564 1.9 maxv } 565 1.20 reinoud 566 1.20 reinoud int 567 1.20 reinoud nvmm_vcpu_stop(struct nvmm_vcpu *vcpu) 568 1.20 reinoud { 569 1.20 reinoud 570 1.20 reinoud *vcpu->stop = 1; 571 1.20 reinoud 572 1.20 reinoud return 0; 573 1.20 reinoud } 574