Home | History | Annotate | Line # | Download | only in x86
xen_shm_machdep.c revision 1.9
      1 /*      $NetBSD: xen_shm_machdep.c,v 1.9 2011/07/31 18:00:54 jym Exp $      */
      2 
      3 /*
      4  * Copyright (c) 2006 Manuel Bouyer.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  *
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.9 2011/07/31 18:00:54 jym Exp $");
     30 
     31 
     32 #include <sys/types.h>
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/queue.h>
     36 #include <sys/vmem.h>
     37 #include <sys/kernel.h>
     38 #include <uvm/uvm.h>
     39 
     40 #include <machine/pmap.h>
     41 #include <xen/hypervisor.h>
     42 #include <xen/xen.h>
     43 #include <xen/evtchn.h>
     44 #include <xen/xen_shm.h>
     45 
     46 /*
     47  * Helper routines for the backend drivers. This implement the necessary
     48  * functions to map a bunch of pages from foreign domains in our kernel VM
     49  * space, do I/O to it, and unmap it.
     50  *
     51  * At boot time, we grab some kernel VM space that we'll use to map the foreign
     52  * pages. We also maintain a virtual to machine mapping table to give back
     53  * the appropriate address to bus_dma if requested.
     54  * If no more VM space is available, we return an error. The caller can then
     55  * register a callback which will be called when the required VM space is
     56  * available.
     57  */
     58 
     59 /* pointers to our VM space */
     60 static vaddr_t xen_shm_base_address;
     61 static u_long xen_shm_base_address_pg;
     62 static vaddr_t xen_shm_end_address;
     63 
     64 /* Grab enough VM space to map an entire vbd ring. */
     65 /* Xen3 linux guests seems to eat more pages, gives enough for 10 vbd rings */
     66 #define BLKIF_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
     67 #define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * 10)
     68 
     69 static vsize_t xen_shm_size = (XENSHM_NPAGES * PAGE_SIZE);
     70 
     71 /* vm space management */
     72 static vmem_t *xen_shm_arena;
     73 
     74 /* callbacks are registered in a FIFO list. */
     75 
     76 static SIMPLEQ_HEAD(xen_shm_callback_head, xen_shm_callback_entry)
     77     xen_shm_callbacks;
     78 struct xen_shm_callback_entry {
     79 	SIMPLEQ_ENTRY(xen_shm_callback_entry) xshmc_entries;
     80 	int (*xshmc_callback)(void *); /* our callback */
     81 	void *xshmc_arg; /* cookie passed to the callback */
     82 };
     83 /* a pool of struct xen_shm_callback_entry */
     84 static struct pool xen_shm_callback_pool;
     85 
     86 #ifdef DEBUG
     87 /* for ratecheck(9) */
     88 static struct timeval xen_shm_errintvl = { 60, 0 };  /* a minute, each */
     89 #endif
     90 
     91 void
     92 xen_shm_init(void)
     93 {
     94 	SIMPLEQ_INIT(&xen_shm_callbacks);
     95 	pool_init(&xen_shm_callback_pool, sizeof(struct xen_shm_callback_entry),
     96 	    0, 0, 0, "xshmc", NULL, IPL_VM);
     97 	/* ensure we'll always get items */
     98 	if (pool_prime(&xen_shm_callback_pool,
     99 	    PAGE_SIZE / sizeof(struct xen_shm_callback_entry)) != 0) {
    100 		panic("xen_shm_init can't prime pool");
    101 	}
    102 
    103 	xen_shm_base_address = uvm_km_alloc(kernel_map, xen_shm_size, 0,
    104 	    UVM_KMF_VAONLY);
    105 	xen_shm_end_address = xen_shm_base_address + xen_shm_size;
    106 	xen_shm_base_address_pg = xen_shm_base_address >> PAGE_SHIFT;
    107 	if (xen_shm_base_address == 0) {
    108 		panic("xen_shm_init no VM space");
    109 	}
    110 	xen_shm_arena = vmem_create("xen_shm",
    111 	    xen_shm_base_address_pg,
    112 	    (xen_shm_end_address >> PAGE_SHIFT) - 1 - xen_shm_base_address_pg,
    113 	    1, NULL, NULL, NULL, 1, VM_NOSLEEP, IPL_VM);
    114 	if (xen_shm_arena == NULL) {
    115 		panic("xen_shm_init no arena");
    116 	}
    117 }
    118 
    119 int
    120 xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t *vap,
    121     grant_handle_t *handlep, int flags)
    122 {
    123 	int s, i;
    124 	vaddr_t new_va;
    125 	u_long new_va_pg;
    126 	int err;
    127 	gnttab_map_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
    128 
    129 #ifdef DIAGNOSTIC
    130 	if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
    131 		printf("xen_shm_map: %d entries\n", nentries);
    132 		panic("xen_shm_map");
    133 	}
    134 #endif
    135 	s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
    136 	/*
    137 	 * if a driver is waiting for ressources, don't try to allocate
    138 	 * yet. This is to avoid a flood of small requests stalling large
    139 	 * ones.
    140 	 */
    141 	if (__predict_false(SIMPLEQ_FIRST(&xen_shm_callbacks) != NULL) &&
    142 	    (flags & XSHM_CALLBACK) == 0) {
    143 #ifdef DEBUG
    144 		static struct timeval lasttime;
    145 #endif
    146 		splx(s);
    147 #ifdef DEBUG
    148 		if (ratecheck(&lasttime, &xen_shm_errintvl))
    149 			printf("xen_shm_map: ENOMEM1\n");
    150 #endif
    151 		return ENOMEM;
    152 	}
    153 	/* allocate the needed virtual space */
    154 	new_va_pg = vmem_alloc(xen_shm_arena, nentries,
    155 	    VM_INSTANTFIT | VM_NOSLEEP);
    156 	if (new_va_pg == 0) {
    157 #ifdef DEBUG
    158 		static struct timeval lasttime;
    159 #endif
    160 		splx(s);
    161 #ifdef DEBUG
    162 		if (ratecheck(&lasttime, &xen_shm_errintvl))
    163 			printf("xen_shm_map: ENOMEM\n");
    164 #endif
    165 		return ENOMEM;
    166 	}
    167 	splx(s);
    168 
    169 	new_va = new_va_pg << PAGE_SHIFT;
    170 	for (i = 0; i < nentries; i++) {
    171 		op[i].host_addr = new_va + i * PAGE_SIZE;
    172 		op[i].dom = domid;
    173 		op[i].ref = grefp[i];
    174 		op[i].flags = GNTMAP_host_map |
    175 		    ((flags & XSHM_RO) ? GNTMAP_readonly : 0);
    176 	}
    177 	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, nentries);
    178 	if (__predict_false(err))
    179 		panic("xen_shm_map: HYPERVISOR_grant_table_op failed");
    180 	for (i = 0; i < nentries; i++) {
    181 		if (__predict_false(op[i].status))
    182 			return op[i].status;
    183 		handlep[i] = op[i].handle;
    184 	}
    185 	*vap = new_va;
    186 	return 0;
    187 }
    188 
    189 void
    190 xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep)
    191 {
    192 	gnttab_unmap_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
    193 	int ret;
    194 	int i;
    195 	int s;
    196 	struct xen_shm_callback_entry *xshmc;
    197 
    198 #ifdef DIAGNOSTIC
    199 	if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
    200 		printf("xen_shm_unmap: %d entries\n", nentries);
    201 		panic("xen_shm_unmap");
    202 	}
    203 #endif
    204 
    205 	for (i = 0; i < nentries; i++) {
    206 		op[i].host_addr = va + i * PAGE_SIZE;
    207 		op[i].dev_bus_addr = 0;
    208 		op[i].handle = handlep[i];
    209 	}
    210 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
    211 	    op, nentries);
    212 	if (__predict_false(ret))
    213 		panic("xen_shm_unmap: unmap failed");
    214 	va = va >> PAGE_SHIFT;
    215 	s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
    216 	vmem_free(xen_shm_arena, va, nentries);
    217 	while (__predict_false((xshmc = SIMPLEQ_FIRST(&xen_shm_callbacks))
    218 	    != NULL)) {
    219 		SIMPLEQ_REMOVE_HEAD(&xen_shm_callbacks, xshmc_entries);
    220 		splx(s);
    221 		if (xshmc->xshmc_callback(xshmc->xshmc_arg) == 0) {
    222 			/* callback succeeded */
    223 			s = splvm();
    224 			pool_put(&xen_shm_callback_pool, xshmc);
    225 		} else {
    226 			/* callback failed, probably out of ressources */
    227 			s = splvm();
    228 			SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc,
    229 					    xshmc_entries);
    230 
    231 			break;
    232 		}
    233 	}
    234 	splx(s);
    235 }
    236 
    237 int
    238 xen_shm_callback(int (*callback)(void *), void *arg)
    239 {
    240 	struct xen_shm_callback_entry *xshmc;
    241 	int s;
    242 
    243 	s = splvm();
    244 	xshmc = pool_get(&xen_shm_callback_pool, PR_NOWAIT);
    245 	if (xshmc == NULL) {
    246 		splx(s);
    247 		return ENOMEM;
    248 	}
    249 	xshmc->xshmc_arg = arg;
    250 	xshmc->xshmc_callback = callback;
    251 	SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc, xshmc_entries);
    252 	splx(s);
    253 	return 0;
    254 }
    255