Home | History | Annotate | Line # | Download | only in x86
xen_bus_dma.c revision 1.17
      1 /*	$NetBSD: xen_bus_dma.c,v 1.17 2010/02/12 01:55:46 jym Exp $	*/
      2 /*	NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
      3 
      4 /*-
      5  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
     10  * Simulation Facility, NASA Ames Research Center.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31  * POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.17 2010/02/12 01:55:46 jym Exp $");
     36 
     37 #include <sys/param.h>
     38 #include <sys/systm.h>
     39 #include <sys/kernel.h>
     40 #include <sys/mbuf.h>
     41 #include <sys/proc.h>
     42 
     43 #include <machine/bus.h>
     44 #include <machine/bus_private.h>
     45 
     46 #include <uvm/uvm_extern.h>
     47 
     48 extern paddr_t avail_end;
     49 
     50 /* Pure 2^n version of get_order */
     51 static inline int get_order(unsigned long size)
     52 {
     53 	int order = -1;
     54 	size = (size - 1) >> (PAGE_SHIFT - 1);
     55 	do {
     56 		size >>= 1;
     57 		order++;
     58 	} while (size);
     59 	return order;
     60 }
     61 
     62 static int
     63 _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary,
     64     struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high)
     65 {
     66 	int order, i;
     67 	unsigned long npagesreq, npages, mfn;
     68 	bus_addr_t pa;
     69 	struct vm_page *pg, *pgnext;
     70 	int s, error;
     71 	struct xen_memory_reservation res;
     72 
     73 	/*
     74 	 * When requesting a contigous memory region, the hypervisor will
     75 	 * return a memory range aligned on size. This will automagically
     76 	 * handle "boundary", but the only way to enforce alignment
     77 	 * is to request a memory region of size max(alignment, size).
     78 	 */
     79 	order = max(get_order(size), get_order(alignment));
     80 	npages = (1 << order);
     81 	npagesreq = (size >> PAGE_SHIFT);
     82 	KASSERT(npages >= npagesreq);
     83 
     84 	/* get npages from UWM, and give them back to the hypervisor */
     85 	error = uvm_pglistalloc(((psize_t)npages) << PAGE_SHIFT,
     86             0, avail_end, 0, 0, mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0);
     87 	if (error)
     88 		return (error);
     89 
     90 	for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) {
     91 		pa = VM_PAGE_TO_PHYS(pg);
     92 		mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
     93 		xpmap_phys_to_machine_mapping[
     94 		    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
     95 		xenguest_handle(res.extent_start) = &mfn;
     96 		res.nr_extents = 1;
     97 		res.extent_order = 0;
     98 		res.domid = DOMID_SELF;
     99 		if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res)
    100 		    != 1) {
    101 #ifdef DEBUG
    102 			printf("xen_alloc_contig: XENMEM_decrease_reservation "
    103 			    "failed!\n");
    104 #endif
    105 			xpmap_phys_to_machine_mapping[
    106 			    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
    107 
    108 			error = ENOMEM;
    109 			goto failed;
    110 		}
    111 	}
    112 	/* Get the new contiguous memory extent */
    113 	xenguest_handle(res.extent_start) = &mfn;
    114 	res.nr_extents = 1;
    115 	res.extent_order = order;
    116 	res.address_bits = get_order(high) + PAGE_SHIFT;
    117 	res.domid = DOMID_SELF;
    118 	error = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res);
    119 	if (error != 1) {
    120 #ifdef DEBUG
    121 		printf("xen_alloc_contig: XENMEM_increase_reservation "
    122 		    "failed: %d (order %d address_bits %d)\n",
    123 		    error, order, res.address_bits);
    124 #endif
    125 		error = ENOMEM;
    126 		pg = NULL;
    127 		goto failed;
    128 	}
    129 	s = splvm();
    130 	/* Map the new extent in place of the old pages */
    131 	for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
    132 		pgnext = pg->pageq.queue.tqe_next;
    133 		pa = VM_PAGE_TO_PHYS(pg);
    134 		xpmap_phys_to_machine_mapping[
    135 		    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i;
    136 		xpq_queue_machphys_update(((paddr_t)(mfn+i)) << PAGE_SHIFT, pa);
    137 		/* while here, give extra pages back to UVM */
    138 		if (i >= npagesreq) {
    139 			TAILQ_REMOVE(mlistp, pg, pageq.queue);
    140 			uvm_pagefree(pg);
    141 		}
    142 	}
    143 	/* Flush updates through and flush the TLB */
    144 	xpq_queue_tlb_flush();
    145 	splx(s);
    146 	return 0;
    147 
    148 failed:
    149 	/*
    150 	 * Attempt to recover from a failed decrease or increase reservation:
    151 	 * if decrease_reservation failed, we don't have given all pages
    152 	 * back to Xen; give them back to UVM, and get the missing pages
    153 	 * from Xen.
    154 	 * if increase_reservation failed, we expect pg to be NULL and we just
    155 	 * get back the missing pages from Xen one by one.
    156 	 */
    157 	/* give back remaining pages to UVM */
    158 	for (; pg != NULL; pg = pgnext) {
    159 		pgnext = pg->pageq.queue.tqe_next;
    160 		TAILQ_REMOVE(mlistp, pg, pageq.queue);
    161 		uvm_pagefree(pg);
    162 	}
    163 	/* remplace the pages that we already gave to Xen */
    164 	s = splvm();
    165 	for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
    166 		pgnext = pg->pageq.queue.tqe_next;
    167 		xenguest_handle(res.extent_start) = &mfn;
    168 		res.nr_extents = 1;
    169 		res.extent_order = 0;
    170 		res.address_bits = 32;
    171 		res.domid = DOMID_SELF;
    172 		if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res)
    173 		    < 0) {
    174 			printf("xen_alloc_contig: recovery "
    175 			    "XENMEM_increase_reservation failed!\n");
    176 			break;
    177 		}
    178 		pa = VM_PAGE_TO_PHYS(pg);
    179 		xpmap_phys_to_machine_mapping[
    180 		    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn;
    181 		xpq_queue_machphys_update(((paddr_t)mfn) << PAGE_SHIFT, pa);
    182 		TAILQ_REMOVE(mlistp, pg, pageq.queue);
    183 		uvm_pagefree(pg);
    184 	}
    185 	/* Flush updates through and flush the TLB */
    186 	xpq_queue_tlb_flush();
    187 	splx(s);
    188 	return error;
    189 }
    190 
    191 
    192 /*
    193  * Allocate physical memory from the given physical address range.
    194  * Called by DMA-safe memory allocation methods.
    195  * We need our own version to deal with physical vs machine addresses.
    196  */
    197 int
    198 _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
    199     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
    200     int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
    201 {
    202 	bus_addr_t curaddr, lastaddr;
    203 	struct vm_page *m;
    204 	struct pglist mlist;
    205 	int curseg, error;
    206 	int doingrealloc = 0;
    207 
    208 	/* Always round the size. */
    209 	size = round_page(size);
    210 
    211 	KASSERT((alignment & (alignment - 1)) == 0);
    212 	KASSERT((boundary & (boundary - 1)) == 0);
    213 	if (alignment < PAGE_SIZE)
    214 		alignment = PAGE_SIZE;
    215 	if (boundary != 0 && boundary < size)
    216 		return (EINVAL);
    217 
    218 	/*
    219 	 * Allocate pages from the VM system.
    220 	 */
    221 	error = uvm_pglistalloc(size, 0, avail_end, alignment, boundary,
    222 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
    223 	if (error)
    224 		return (error);
    225 again:
    226 
    227 	/*
    228 	 * Compute the location, size, and number of segments actually
    229 	 * returned by the VM code.
    230 	 */
    231 	m = mlist.tqh_first;
    232 	curseg = 0;
    233 	curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
    234 	if (curaddr < low || curaddr >= high)
    235 		goto badaddr;
    236 	segs[curseg].ds_len = PAGE_SIZE;
    237 	m = m->pageq.queue.tqe_next;
    238 	if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
    239 		goto dorealloc;
    240 
    241 	for (; m != NULL; m = m->pageq.queue.tqe_next) {
    242 		curaddr = _BUS_VM_PAGE_TO_BUS(m);
    243 		if (curaddr < low || curaddr >= high)
    244 			goto badaddr;
    245 		if (curaddr == (lastaddr + PAGE_SIZE)) {
    246 			segs[curseg].ds_len += PAGE_SIZE;
    247 			if ((lastaddr & boundary) != (curaddr & boundary))
    248 				goto dorealloc;
    249 		} else {
    250 			curseg++;
    251 			if (curseg >= nsegs || (curaddr & (alignment - 1)) != 0)
    252 				goto dorealloc;
    253 			segs[curseg].ds_addr = curaddr;
    254 			segs[curseg].ds_len = PAGE_SIZE;
    255 		}
    256 		lastaddr = curaddr;
    257 	}
    258 
    259 	*rsegs = curseg + 1;
    260 	return (0);
    261 
    262 badaddr:
    263 	if (doingrealloc == 0)
    264 		goto dorealloc;
    265 	if (curaddr < low) {
    266 		/* no way to enforce this */
    267 		printf("_xen_bus_dmamem_alloc_range: no way to "
    268 		    "enforce address range (0x%" PRIx64 " - 0x%" PRIx64 ")\n",
    269 		    (uint64_t)low, (uint64_t)high);
    270 		uvm_pglistfree(&mlist);
    271 		return EINVAL;
    272 	}
    273 	printf("xen_bus_dmamem_alloc_range: "
    274 	    "curraddr=0x%lx > high=0x%lx\n",
    275 	    (u_long)curaddr, (u_long)high);
    276 	panic("xen_bus_dmamem_alloc_range 1");
    277 dorealloc:
    278 	if (doingrealloc == 1)
    279 		panic("_xen_bus_dmamem_alloc_range: "
    280 		   "xen_alloc_contig returned "
    281 		   "too much segments");
    282 	doingrealloc = 1;
    283 	/*
    284 	 * Too much segments, or memory doesn't fit
    285 	 * constraints. Free this memory and
    286 	 * get a contigous segment from the hypervisor.
    287 	 */
    288 	uvm_pglistfree(&mlist);
    289 	for (curseg = 0; curseg < nsegs; curseg++) {
    290 		segs[curseg].ds_addr = 0;
    291 		segs[curseg].ds_len = 0;
    292 	}
    293 	error = _xen_alloc_contig(size, alignment,
    294 	    boundary, &mlist, flags, low, high);
    295 	if (error)
    296 		return error;
    297 	goto again;
    298 }
    299