Home | History | Annotate | Line # | Download | only in libnvmm
libnvmm.c revision 1.8
      1 /*	$NetBSD: libnvmm.c,v 1.8 2019/04/04 17:33:47 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Maxime Villard.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 
     34 #include <stdio.h>
     35 #include <stdlib.h>
     36 #include <string.h>
     37 #include <unistd.h>
     38 #include <fcntl.h>
     39 #include <errno.h>
     40 #include <sys/ioctl.h>
     41 #include <sys/mman.h>
     42 #include <sys/queue.h>
     43 
     44 #include "nvmm.h"
     45 
     46 struct nvmm_callbacks __callbacks;
     47 
     48 typedef struct __area {
     49 	LIST_ENTRY(__area) list;
     50 	gpaddr_t gpa;
     51 	uintptr_t hva;
     52 	size_t size;
     53 	nvmm_prot_t prot;
     54 } area_t;
     55 
     56 typedef LIST_HEAD(, __area) area_list_t;
     57 
     58 static int nvmm_fd = -1;
     59 
     60 /* -------------------------------------------------------------------------- */
     61 
     62 static bool
     63 __area_isvalid(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
     64     size_t size)
     65 {
     66 	area_list_t *areas = mach->areas;
     67 	area_t *ent;
     68 
     69 	LIST_FOREACH(ent, areas, list) {
     70 		/* Collision on GPA */
     71 		if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
     72 			return false;
     73 		}
     74 		if (gpa + size > ent->gpa &&
     75 		    gpa + size <= ent->gpa + ent->size) {
     76 			return false;
     77 		}
     78 		if (gpa <= ent->gpa && gpa + size >= ent->gpa + ent->size) {
     79 			return false;
     80 		}
     81 	}
     82 
     83 	return true;
     84 }
     85 
     86 static int
     87 __area_add(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, size_t size,
     88     int prot)
     89 {
     90 	area_list_t *areas = mach->areas;
     91 	nvmm_prot_t nprot;
     92 	area_t *area;
     93 
     94 	nprot = 0;
     95 	if (prot & PROT_READ)
     96 		nprot |= NVMM_PROT_READ;
     97 	if (prot & PROT_WRITE)
     98 		nprot |= NVMM_PROT_WRITE;
     99 	if (prot & PROT_EXEC)
    100 		nprot |= NVMM_PROT_EXEC;
    101 
    102 	if (!__area_isvalid(mach, hva, gpa, size)) {
    103 		errno = EINVAL;
    104 		return -1;
    105 	}
    106 
    107 	area = malloc(sizeof(*area));
    108 	if (area == NULL)
    109 		return -1;
    110 	area->gpa = gpa;
    111 	area->hva = hva;
    112 	area->size = size;
    113 	area->prot = nprot;
    114 
    115 	LIST_INSERT_HEAD(areas, area, list);
    116 
    117 	return 0;
    118 }
    119 
    120 static int
    121 __area_delete(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
    122     size_t size)
    123 {
    124 	area_list_t *areas = mach->areas;
    125 	area_t *ent, *nxt;
    126 
    127 	LIST_FOREACH_SAFE(ent, areas, list, nxt) {
    128 		if (hva == ent->hva && gpa == ent->gpa && size == ent->size) {
    129 			LIST_REMOVE(ent, list);
    130 			free(ent);
    131 			return 0;
    132 		}
    133 	}
    134 
    135 	return -1;
    136 }
    137 
    138 static void
    139 __area_remove_all(struct nvmm_machine *mach)
    140 {
    141 	area_list_t *areas = mach->areas;
    142 	area_t *ent;
    143 
    144 	while ((ent = LIST_FIRST(areas)) != NULL) {
    145 		LIST_REMOVE(ent, list);
    146 		free(ent);
    147 	}
    148 
    149 	free(areas);
    150 }
    151 
    152 /* -------------------------------------------------------------------------- */
    153 
    154 static int
    155 nvmm_init(void)
    156 {
    157 	if (nvmm_fd != -1)
    158 		return 0;
    159 	nvmm_fd = open("/dev/nvmm", O_RDWR);
    160 	if (nvmm_fd == -1)
    161 		return -1;
    162 	return 0;
    163 }
    164 
    165 int
    166 nvmm_capability(struct nvmm_capability *cap)
    167 {
    168 	struct nvmm_ioc_capability args;
    169 	int ret;
    170 
    171 	if (nvmm_init() == -1) {
    172 		return -1;
    173 	}
    174 
    175 	ret = ioctl(nvmm_fd, NVMM_IOC_CAPABILITY, &args);
    176 	if (ret == -1)
    177 		return -1;
    178 
    179 	memcpy(cap, &args.cap, sizeof(args.cap));
    180 
    181 	return 0;
    182 }
    183 
    184 int
    185 nvmm_machine_create(struct nvmm_machine *mach)
    186 {
    187 	struct nvmm_ioc_machine_create args;
    188 	area_list_t *areas;
    189 	int ret;
    190 
    191 	if (nvmm_init() == -1) {
    192 		return -1;
    193 	}
    194 
    195 	areas = calloc(1, sizeof(*areas));
    196 	if (areas == NULL)
    197 		return -1;
    198 
    199 	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CREATE, &args);
    200 	if (ret == -1) {
    201 		free(areas);
    202 		return -1;
    203 	}
    204 
    205 	memset(mach, 0, sizeof(*mach));
    206 	LIST_INIT(areas);
    207 	mach->areas = areas;
    208 	mach->machid = args.machid;
    209 
    210 	return 0;
    211 }
    212 
    213 int
    214 nvmm_machine_destroy(struct nvmm_machine *mach)
    215 {
    216 	struct nvmm_ioc_machine_destroy args;
    217 	int ret;
    218 
    219 	if (nvmm_init() == -1) {
    220 		return -1;
    221 	}
    222 
    223 	args.machid = mach->machid;
    224 
    225 	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_DESTROY, &args);
    226 	if (ret == -1)
    227 		return -1;
    228 
    229 	__area_remove_all(mach);
    230 
    231 	return 0;
    232 }
    233 
    234 int
    235 nvmm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *conf)
    236 {
    237 	struct nvmm_ioc_machine_configure args;
    238 	int ret;
    239 
    240 	if (nvmm_init() == -1) {
    241 		return -1;
    242 	}
    243 
    244 	args.machid = mach->machid;
    245 	args.op = op;
    246 	args.conf = conf;
    247 
    248 	ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CONFIGURE, &args);
    249 	if (ret == -1)
    250 		return -1;
    251 
    252 	return 0;
    253 }
    254 
    255 int
    256 nvmm_vcpu_create(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
    257 {
    258 	struct nvmm_ioc_vcpu_create args;
    259 	int ret;
    260 
    261 	if (nvmm_init() == -1) {
    262 		return -1;
    263 	}
    264 
    265 	args.machid = mach->machid;
    266 	args.cpuid = cpuid;
    267 
    268 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_CREATE, &args);
    269 	if (ret == -1)
    270 		return -1;
    271 
    272 	return 0;
    273 }
    274 
    275 int
    276 nvmm_vcpu_destroy(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
    277 {
    278 	struct nvmm_ioc_vcpu_destroy args;
    279 	int ret;
    280 
    281 	if (nvmm_init() == -1) {
    282 		return -1;
    283 	}
    284 
    285 	args.machid = mach->machid;
    286 	args.cpuid = cpuid;
    287 
    288 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_DESTROY, &args);
    289 	if (ret == -1)
    290 		return -1;
    291 
    292 	return 0;
    293 }
    294 
    295 int
    296 nvmm_vcpu_setstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
    297     void *state, uint64_t flags)
    298 {
    299 	struct nvmm_ioc_vcpu_setstate args;
    300 	int ret;
    301 
    302 	if (nvmm_init() == -1) {
    303 		return -1;
    304 	}
    305 
    306 	args.machid = mach->machid;
    307 	args.cpuid = cpuid;
    308 	args.state = state;
    309 	args.flags = flags;
    310 
    311 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_SETSTATE, &args);
    312 	if (ret == -1)
    313 		return -1;
    314 
    315 	return 0;
    316 }
    317 
    318 int
    319 nvmm_vcpu_getstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
    320     void *state, uint64_t flags)
    321 {
    322 	struct nvmm_ioc_vcpu_getstate args;
    323 	int ret;
    324 
    325 	if (nvmm_init() == -1) {
    326 		return -1;
    327 	}
    328 
    329 	args.machid = mach->machid;
    330 	args.cpuid = cpuid;
    331 	args.state = state;
    332 	args.flags = flags;
    333 
    334 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_GETSTATE, &args);
    335 	if (ret == -1)
    336 		return -1;
    337 
    338 	return 0;
    339 }
    340 
    341 int
    342 nvmm_vcpu_inject(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
    343     struct nvmm_event *event)
    344 {
    345 	struct nvmm_ioc_vcpu_inject args;
    346 	int ret;
    347 
    348 	if (nvmm_init() == -1) {
    349 		return -1;
    350 	}
    351 
    352 	args.machid = mach->machid;
    353 	args.cpuid = cpuid;
    354 	memcpy(&args.event, event, sizeof(args.event));
    355 
    356 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_INJECT, &args);
    357 	if (ret == -1)
    358 		return -1;
    359 
    360 	return 0;
    361 }
    362 
    363 int
    364 nvmm_vcpu_run(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
    365     struct nvmm_exit *exit)
    366 {
    367 	struct nvmm_ioc_vcpu_run args;
    368 	int ret;
    369 
    370 	if (nvmm_init() == -1) {
    371 		return -1;
    372 	}
    373 
    374 	args.machid = mach->machid;
    375 	args.cpuid = cpuid;
    376 	memset(&args.exit, 0, sizeof(args.exit));
    377 
    378 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_RUN, &args);
    379 	if (ret == -1)
    380 		return -1;
    381 
    382 	memcpy(exit, &args.exit, sizeof(args.exit));
    383 
    384 	return 0;
    385 }
    386 
    387 int
    388 nvmm_gpa_map(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
    389     size_t size, int prot)
    390 {
    391 	struct nvmm_ioc_gpa_map args;
    392 	int ret;
    393 
    394 	if (nvmm_init() == -1) {
    395 		return -1;
    396 	}
    397 
    398 	ret = __area_add(mach, hva, gpa, size, prot);
    399 	if (ret == -1)
    400 		return -1;
    401 
    402 	args.machid = mach->machid;
    403 	args.hva = hva;
    404 	args.gpa = gpa;
    405 	args.size = size;
    406 	args.prot = prot;
    407 
    408 	ret = ioctl(nvmm_fd, NVMM_IOC_GPA_MAP, &args);
    409 	if (ret == -1) {
    410 		/* Can't recover. */
    411 		abort();
    412 	}
    413 
    414 	return 0;
    415 }
    416 
    417 int
    418 nvmm_gpa_unmap(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
    419     size_t size)
    420 {
    421 	struct nvmm_ioc_gpa_unmap args;
    422 	int ret;
    423 
    424 	if (nvmm_init() == -1) {
    425 		return -1;
    426 	}
    427 
    428 	ret = __area_delete(mach, hva, gpa, size);
    429 	if (ret == -1)
    430 		return -1;
    431 
    432 	args.machid = mach->machid;
    433 	args.gpa = gpa;
    434 	args.size = size;
    435 
    436 	ret = ioctl(nvmm_fd, NVMM_IOC_GPA_UNMAP, &args);
    437 	if (ret == -1) {
    438 		/* Can't recover. */
    439 		abort();
    440 	}
    441 
    442 	return 0;
    443 }
    444 
    445 int
    446 nvmm_hva_map(struct nvmm_machine *mach, uintptr_t hva, size_t size)
    447 {
    448 	struct nvmm_ioc_hva_map args;
    449 	int ret;
    450 
    451 	if (nvmm_init() == -1) {
    452 		return -1;
    453 	}
    454 
    455 	args.machid = mach->machid;
    456 	args.hva = hva;
    457 	args.size = size;
    458 
    459 	ret = ioctl(nvmm_fd, NVMM_IOC_HVA_MAP, &args);
    460 	if (ret == -1)
    461 		return -1;
    462 
    463 	return 0;
    464 }
    465 
    466 int
    467 nvmm_hva_unmap(struct nvmm_machine *mach, uintptr_t hva, size_t size)
    468 {
    469 	struct nvmm_ioc_hva_unmap args;
    470 	int ret;
    471 
    472 	if (nvmm_init() == -1) {
    473 		return -1;
    474 	}
    475 
    476 	args.machid = mach->machid;
    477 	args.hva = hva;
    478 	args.size = size;
    479 
    480 	ret = ioctl(nvmm_fd, NVMM_IOC_HVA_UNMAP, &args);
    481 	if (ret == -1)
    482 		return -1;
    483 
    484 	return 0;
    485 }
    486 
    487 /*
    488  * nvmm_gva_to_gpa(): architecture-specific.
    489  */
    490 
    491 int
    492 nvmm_gpa_to_hva(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t *hva,
    493     nvmm_prot_t *prot)
    494 {
    495 	area_list_t *areas = mach->areas;
    496 	area_t *ent;
    497 
    498 	LIST_FOREACH(ent, areas, list) {
    499 		if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
    500 			*hva = ent->hva + (gpa - ent->gpa);
    501 			*prot = ent->prot;
    502 			return 0;
    503 		}
    504 	}
    505 
    506 	errno = ENOENT;
    507 	return -1;
    508 }
    509 
    510 /*
    511  * nvmm_assist_io(): architecture-specific.
    512  */
    513 
    514 /*
    515  * nvmm_assist_mem(): architecture-specific.
    516  */
    517 
    518 void
    519 nvmm_callbacks_register(const struct nvmm_callbacks *cbs)
    520 {
    521 	memcpy(&__callbacks, cbs, sizeof(__callbacks));
    522 }
    523