Home | History | Annotate | Line # | Download | only in x86
xen_shm_machdep.c revision 1.12
      1 /*      $NetBSD: xen_shm_machdep.c,v 1.12 2018/07/27 09:22:40 maxv Exp $      */
      2 
      3 /*
      4  * Copyright (c) 2006 Manuel Bouyer.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.12 2018/07/27 09:22:40 maxv Exp $");
     29 
     30 #include <sys/types.h>
     31 #include <sys/param.h>
     32 #include <sys/systm.h>
     33 #include <sys/queue.h>
     34 #include <sys/vmem.h>
     35 #include <sys/kernel.h>
     36 #include <uvm/uvm.h>
     37 
     38 #include <machine/pmap.h>
     39 #include <xen/hypervisor.h>
     40 #include <xen/xen.h>
     41 #include <xen/evtchn.h>
     42 #include <xen/xen_shm.h>
     43 
     44 /*
     45  * Helper routines for the backend drivers. This implements the necessary
     46  * functions to map a bunch of pages from foreign domains into our kernel VM
     47  * space, do I/O to it, and unmap it.
     48  *
     49  * At boot time, we grab some kernel VM space that we'll use to map the foreign
     50  * pages. We also maintain a virtual-to-machine mapping table to give back
     51  * the appropriate address to bus_dma if requested.
     52  *
     53  * If no more VM space is available, we return an error. The caller can then
     54  * register a callback which will be called when the required VM space is
     55  * available.
     56  */
     57 
     58 /* Grab enough VM space to map an entire vbd ring. */
     59 /* Xen3 linux guests seems to eat more pages, gives enough for 10 vbd rings */
     60 #define BLKIF_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
     61 #define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * 10)
     62 
     63 /* vm space management */
     64 static vmem_t *xen_shm_arena __read_mostly;
     65 
     66 /* callbacks are registered in a FIFO list. */
     67 static SIMPLEQ_HEAD(xen_shm_callback_head, xen_shm_callback_entry)
     68     xen_shm_callbacks;
     69 
     70 struct xen_shm_callback_entry {
     71 	SIMPLEQ_ENTRY(xen_shm_callback_entry) xshmc_entries;
     72 	int (*xshmc_callback)(void *); /* our callback */
     73 	void *xshmc_arg; /* cookie passed to the callback */
     74 };
     75 
     76 /* a pool of struct xen_shm_callback_entry */
     77 static struct pool xen_shm_callback_pool;
     78 
     79 #ifdef DEBUG
     80 /* for ratecheck(9) */
     81 static struct timeval xen_shm_errintvl = { 60, 0 };  /* a minute, each */
     82 #endif
     83 
     84 void
     85 xen_shm_init(void)
     86 {
     87 	vaddr_t xen_shm_base_address;
     88 	vaddr_t xen_shm_end_address;
     89 	u_long xen_shm_base_address_pg;
     90 	vsize_t xen_shm_size;
     91 
     92 	SIMPLEQ_INIT(&xen_shm_callbacks);
     93 	pool_init(&xen_shm_callback_pool, sizeof(struct xen_shm_callback_entry),
     94 	    0, 0, 0, "xshmc", NULL, IPL_VM);
     95 	/* ensure we'll always get items */
     96 	if (pool_prime(&xen_shm_callback_pool,
     97 	    PAGE_SIZE / sizeof(struct xen_shm_callback_entry)) != 0) {
     98 		panic("xen_shm_init can't prime pool");
     99 	}
    100 
    101 	xen_shm_size = (XENSHM_NPAGES * PAGE_SIZE);
    102 
    103 	xen_shm_base_address = uvm_km_alloc(kernel_map, xen_shm_size, 0,
    104 	    UVM_KMF_VAONLY);
    105 	xen_shm_end_address = xen_shm_base_address + xen_shm_size;
    106 	xen_shm_base_address_pg = xen_shm_base_address >> PAGE_SHIFT;
    107 	if (xen_shm_base_address == 0) {
    108 		panic("xen_shm_init no VM space");
    109 	}
    110 	xen_shm_arena = vmem_create("xen_shm", xen_shm_base_address_pg,
    111 	    (xen_shm_end_address >> PAGE_SHIFT) - 1 - xen_shm_base_address_pg,
    112 	    1, NULL, NULL, NULL, 1, VM_NOSLEEP, IPL_VM);
    113 	if (xen_shm_arena == NULL) {
    114 		panic("xen_shm_init no arena");
    115 	}
    116 }
    117 
    118 int
    119 xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t *vap,
    120     grant_handle_t *handlep, int flags)
    121 {
    122 	gnttab_map_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
    123 	vmem_addr_t new_va_pg;
    124 	vaddr_t new_va;
    125 	int ret, i, s;
    126 
    127 #ifdef DIAGNOSTIC
    128 	if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
    129 		panic("xen_shm_map: %d entries", nentries);
    130 	}
    131 #endif
    132 
    133 	/* XXXSMP */
    134 	s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
    135 
    136 	/*
    137 	 * If a driver is waiting for resources, don't try to allocate
    138 	 * yet. This is to avoid a flood of small requests stalling large
    139 	 * ones.
    140 	 */
    141 	if (__predict_false(SIMPLEQ_FIRST(&xen_shm_callbacks) != NULL) &&
    142 	    (flags & XSHM_CALLBACK) == 0) {
    143 		splx(s);
    144 #ifdef DEBUG
    145 		static struct timeval lasttime;
    146 		if (ratecheck(&lasttime, &xen_shm_errintvl))
    147 			printf("xen_shm_map: ENOMEM1\n");
    148 #endif
    149 		return ENOMEM;
    150 	}
    151 
    152 	/* Allocate the needed virtual space. */
    153 	if (vmem_alloc(xen_shm_arena, nentries,
    154 	    VM_INSTANTFIT | VM_NOSLEEP, &new_va_pg) != 0) {
    155 		splx(s);
    156 #ifdef DEBUG
    157 		static struct timeval lasttime;
    158 		if (ratecheck(&lasttime, &xen_shm_errintvl))
    159 			printf("xen_shm_map: ENOMEM\n");
    160 #endif
    161 		return ENOMEM;
    162 	}
    163 	splx(s);
    164 
    165 	new_va = new_va_pg << PAGE_SHIFT;
    166 	for (i = 0; i < nentries; i++) {
    167 		op[i].host_addr = new_va + i * PAGE_SIZE;
    168 		op[i].dom = domid;
    169 		op[i].ref = grefp[i];
    170 		op[i].flags = GNTMAP_host_map |
    171 		    ((flags & XSHM_RO) ? GNTMAP_readonly : 0);
    172 	}
    173 
    174 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, nentries);
    175 	if (__predict_false(ret)) {
    176 		panic("xen_shm_map: HYPERVISOR_grant_table_op failed");
    177 	}
    178 
    179 	for (i = 0; i < nentries; i++) {
    180 		if (__predict_false(op[i].status))
    181 			return op[i].status;
    182 		handlep[i] = op[i].handle;
    183 	}
    184 
    185 	*vap = new_va;
    186 	return 0;
    187 }
    188 
    189 void
    190 xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep)
    191 {
    192 	gnttab_unmap_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
    193 	struct xen_shm_callback_entry *xshmc;
    194 	int ret, i, s;
    195 
    196 #ifdef DIAGNOSTIC
    197 	if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
    198 		panic("xen_shm_unmap: %d entries", nentries);
    199 	}
    200 #endif
    201 
    202 	for (i = 0; i < nentries; i++) {
    203 		op[i].host_addr = va + i * PAGE_SIZE;
    204 		op[i].dev_bus_addr = 0;
    205 		op[i].handle = handlep[i];
    206 	}
    207 
    208 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
    209 	    op, nentries);
    210 	if (__predict_false(ret)) {
    211 		panic("xen_shm_unmap: unmap failed");
    212 	}
    213 
    214 	va = va >> PAGE_SHIFT;
    215 
    216 	/* XXXSMP */
    217 	s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
    218 
    219 	vmem_free(xen_shm_arena, va, nentries);
    220 	while (__predict_false((xshmc = SIMPLEQ_FIRST(&xen_shm_callbacks))
    221 	    != NULL)) {
    222 		SIMPLEQ_REMOVE_HEAD(&xen_shm_callbacks, xshmc_entries);
    223 		splx(s);
    224 		if (xshmc->xshmc_callback(xshmc->xshmc_arg) == 0) {
    225 			/* callback succeeded */
    226 			s = splvm(); /* XXXSMP */
    227 			pool_put(&xen_shm_callback_pool, xshmc);
    228 		} else {
    229 			/* callback failed, probably out of resources */
    230 			s = splvm(); /* XXXSMP */
    231 			SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc,
    232 			    xshmc_entries);
    233 			break;
    234 		}
    235 	}
    236 
    237 	splx(s);
    238 }
    239 
    240 int
    241 xen_shm_callback(int (*callback)(void *), void *arg)
    242 {
    243 	struct xen_shm_callback_entry *xshmc;
    244 	int s;
    245 
    246 	s = splvm(); /* XXXSMP */
    247 	xshmc = pool_get(&xen_shm_callback_pool, PR_NOWAIT);
    248 	if (xshmc == NULL) {
    249 		splx(s);
    250 		return ENOMEM;
    251 	}
    252 	xshmc->xshmc_arg = arg;
    253 	xshmc->xshmc_callback = callback;
    254 	SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc, xshmc_entries);
    255 	splx(s);
    256 	return 0;
    257 }
    258