1 1.34 andvar /* $NetBSD: xen_bus_dma.c,v 1.34 2024/05/14 19:00:44 andvar Exp $ */ 2 1.1 bouyer /* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */ 3 1.1 bouyer 4 1.1 bouyer /*- 5 1.1 bouyer * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 6 1.1 bouyer * All rights reserved. 7 1.1 bouyer * 8 1.1 bouyer * This code is derived from software contributed to The NetBSD Foundation 9 1.1 bouyer * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 10 1.1 bouyer * Simulation Facility, NASA Ames Research Center. 11 1.1 bouyer * 12 1.1 bouyer * Redistribution and use in source and binary forms, with or without 13 1.1 bouyer * modification, are permitted provided that the following conditions 14 1.1 bouyer * are met: 15 1.1 bouyer * 1. Redistributions of source code must retain the above copyright 16 1.1 bouyer * notice, this list of conditions and the following disclaimer. 17 1.1 bouyer * 2. Redistributions in binary form must reproduce the above copyright 18 1.1 bouyer * notice, this list of conditions and the following disclaimer in the 19 1.1 bouyer * documentation and/or other materials provided with the distribution. 20 1.1 bouyer * 21 1.1 bouyer * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 1.1 bouyer * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 1.1 bouyer * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 1.1 bouyer * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 1.1 bouyer * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 1.1 bouyer * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 1.1 bouyer * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 1.1 bouyer * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 1.1 bouyer * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 1.1 bouyer * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 1.1 bouyer * POSSIBILITY OF SUCH DAMAGE. 32 1.1 bouyer */ 33 1.1 bouyer 34 1.1 bouyer #include <sys/cdefs.h> 35 1.34 andvar __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.34 2024/05/14 19:00:44 andvar Exp $"); 36 1.1 bouyer 37 1.1 bouyer #include <sys/param.h> 38 1.1 bouyer #include <sys/systm.h> 39 1.1 bouyer #include <sys/kernel.h> 40 1.1 bouyer #include <sys/mbuf.h> 41 1.1 bouyer #include <sys/proc.h> 42 1.33 riastrad #include <sys/bus.h> 43 1.1 bouyer 44 1.1 bouyer #include <machine/bus_private.h> 45 1.33 riastrad #include <machine/pmap_private.h> 46 1.1 bouyer 47 1.22 njoly #include <uvm/uvm.h> 48 1.1 bouyer 49 1.31 bouyer #include "opt_xen.h" 50 1.1 bouyer 51 1.30 jdolecek /* No special needs */ 52 1.30 jdolecek struct x86_bus_dma_tag xenbus_bus_dma_tag = { 53 1.30 jdolecek ._tag_needs_free = 0, 54 1.30 jdolecek ._bounce_thresh = 0, 55 1.30 jdolecek ._bounce_alloc_lo = 0, 56 1.30 jdolecek ._bounce_alloc_hi = 0, 57 1.30 jdolecek ._may_bounce = NULL, 58 1.30 jdolecek }; 59 1.30 jdolecek 60 1.31 bouyer #ifdef XENPV 61 1.31 bouyer 62 1.31 bouyer extern paddr_t avail_end; 63 1.31 bouyer 64 1.1 bouyer /* Pure 2^n version of get_order */ 65 1.5 perry static inline int get_order(unsigned long size) 66 1.1 bouyer { 67 1.1 bouyer int order = -1; 68 1.1 bouyer size = (size - 1) >> (PAGE_SHIFT - 1); 69 1.1 bouyer do { 70 1.1 bouyer size >>= 1; 71 1.1 bouyer order++; 72 1.1 bouyer } while (size); 73 1.1 bouyer return order; 74 1.1 bouyer } 75 1.1 bouyer 76 1.1 bouyer static int 77 1.21 bouyer _xen_alloc_contig(bus_size_t size, bus_size_t alignment, 78 1.9 bouyer struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high) 79 1.1 bouyer { 80 1.1 bouyer int order, i; 81 1.1 bouyer unsigned long npagesreq, npages, mfn; 82 1.1 bouyer bus_addr_t pa; 83 1.1 bouyer struct vm_page *pg, *pgnext; 84 1.1 bouyer int s, error; 85 1.6 bouyer struct xen_memory_reservation res; 86 1.1 bouyer 87 1.1 bouyer /* 88 1.34 andvar * When requesting a contiguous memory region, the hypervisor will 89 1.21 bouyer * return a memory range aligned on size. 90 1.21 bouyer * The only way to enforce alignment is to request a memory region 91 1.21 bouyer * of size max(alignment, size). 92 1.1 bouyer */ 93 1.28 riastrad order = uimax(get_order(size), get_order(alignment)); 94 1.1 bouyer npages = (1 << order); 95 1.1 bouyer npagesreq = (size >> PAGE_SHIFT); 96 1.1 bouyer KASSERT(npages >= npagesreq); 97 1.1 bouyer 98 1.19 jym /* get npages from UVM, and give them back to the hypervisor */ 99 1.16 cegger error = uvm_pglistalloc(((psize_t)npages) << PAGE_SHIFT, 100 1.16 cegger 0, avail_end, 0, 0, mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0); 101 1.1 bouyer if (error) 102 1.1 bouyer return (error); 103 1.1 bouyer 104 1.11 ad for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) { 105 1.1 bouyer pa = VM_PAGE_TO_PHYS(pg); 106 1.1 bouyer mfn = xpmap_ptom(pa) >> PAGE_SHIFT; 107 1.25 jym xpmap_ptom_unmap(pa); 108 1.26 jym set_xen_guest_handle(res.extent_start, &mfn); 109 1.6 bouyer res.nr_extents = 1; 110 1.6 bouyer res.extent_order = 0; 111 1.29 jdolecek res.mem_flags = 0; 112 1.6 bouyer res.domid = DOMID_SELF; 113 1.19 jym error = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res); 114 1.19 jym if (error != 1) { 115 1.8 bouyer #ifdef DEBUG 116 1.6 bouyer printf("xen_alloc_contig: XENMEM_decrease_reservation " 117 1.18 jym "failed: err %d (pa %#" PRIxPADDR " mfn %#lx)\n", 118 1.18 jym error, pa, mfn); 119 1.8 bouyer #endif 120 1.25 jym xpmap_ptom_map(pa, ptoa(mfn)); 121 1.7 bouyer 122 1.7 bouyer error = ENOMEM; 123 1.7 bouyer goto failed; 124 1.6 bouyer } 125 1.1 bouyer } 126 1.1 bouyer /* Get the new contiguous memory extent */ 127 1.26 jym set_xen_guest_handle(res.extent_start, &mfn); 128 1.6 bouyer res.nr_extents = 1; 129 1.6 bouyer res.extent_order = order; 130 1.29 jdolecek res.mem_flags = XENMEMF_address_bits(get_order(high) + PAGE_SHIFT); 131 1.6 bouyer res.domid = DOMID_SELF; 132 1.14 bouyer error = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res); 133 1.14 bouyer if (error != 1) { 134 1.8 bouyer #ifdef DEBUG 135 1.6 bouyer printf("xen_alloc_contig: XENMEM_increase_reservation " 136 1.29 jdolecek "failed: %d (order %d mem_flags %d)\n", 137 1.29 jdolecek error, order, res.mem_flags); 138 1.8 bouyer #endif 139 1.7 bouyer error = ENOMEM; 140 1.7 bouyer pg = NULL; 141 1.7 bouyer goto failed; 142 1.6 bouyer } 143 1.27 jdolecek s = splvm(); /* XXXSMP */ 144 1.1 bouyer /* Map the new extent in place of the old pages */ 145 1.1 bouyer for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) { 146 1.11 ad pgnext = pg->pageq.queue.tqe_next; 147 1.1 bouyer pa = VM_PAGE_TO_PHYS(pg); 148 1.25 jym xpmap_ptom_map(pa, ptoa(mfn+i)); 149 1.16 cegger xpq_queue_machphys_update(((paddr_t)(mfn+i)) << PAGE_SHIFT, pa); 150 1.32 bouyer } 151 1.32 bouyer /* Flush updates through and flush the TLB */ 152 1.32 bouyer xpq_queue_tlb_flush(); 153 1.32 bouyer splx(s); 154 1.32 bouyer /* now that ptom/mtop are valid, give the extra pages back to UVM */ 155 1.32 bouyer for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) { 156 1.32 bouyer pgnext = pg->pageq.queue.tqe_next; 157 1.1 bouyer if (i >= npagesreq) { 158 1.11 ad TAILQ_REMOVE(mlistp, pg, pageq.queue); 159 1.1 bouyer uvm_pagefree(pg); 160 1.1 bouyer } 161 1.1 bouyer } 162 1.1 bouyer return 0; 163 1.7 bouyer 164 1.7 bouyer failed: 165 1.7 bouyer /* 166 1.7 bouyer * Attempt to recover from a failed decrease or increase reservation: 167 1.7 bouyer * if decrease_reservation failed, we don't have given all pages 168 1.7 bouyer * back to Xen; give them back to UVM, and get the missing pages 169 1.7 bouyer * from Xen. 170 1.7 bouyer * if increase_reservation failed, we expect pg to be NULL and we just 171 1.7 bouyer * get back the missing pages from Xen one by one. 172 1.7 bouyer */ 173 1.7 bouyer /* give back remaining pages to UVM */ 174 1.7 bouyer for (; pg != NULL; pg = pgnext) { 175 1.11 ad pgnext = pg->pageq.queue.tqe_next; 176 1.11 ad TAILQ_REMOVE(mlistp, pg, pageq.queue); 177 1.7 bouyer uvm_pagefree(pg); 178 1.7 bouyer } 179 1.7 bouyer /* remplace the pages that we already gave to Xen */ 180 1.27 jdolecek s = splvm(); /* XXXSMP */ 181 1.7 bouyer for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) { 182 1.11 ad pgnext = pg->pageq.queue.tqe_next; 183 1.26 jym set_xen_guest_handle(res.extent_start, &mfn); 184 1.7 bouyer res.nr_extents = 1; 185 1.7 bouyer res.extent_order = 0; 186 1.29 jdolecek res.mem_flags = XENMEMF_address_bits(32); 187 1.7 bouyer res.domid = DOMID_SELF; 188 1.7 bouyer if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res) 189 1.7 bouyer < 0) { 190 1.7 bouyer printf("xen_alloc_contig: recovery " 191 1.7 bouyer "XENMEM_increase_reservation failed!\n"); 192 1.7 bouyer break; 193 1.7 bouyer } 194 1.7 bouyer pa = VM_PAGE_TO_PHYS(pg); 195 1.25 jym xpmap_ptom_map(pa, ptoa(mfn)); 196 1.16 cegger xpq_queue_machphys_update(((paddr_t)mfn) << PAGE_SHIFT, pa); 197 1.32 bouyer /* slow but we don't care */ 198 1.32 bouyer xpq_queue_tlb_flush(); 199 1.11 ad TAILQ_REMOVE(mlistp, pg, pageq.queue); 200 1.7 bouyer uvm_pagefree(pg); 201 1.7 bouyer } 202 1.7 bouyer splx(s); 203 1.7 bouyer return error; 204 1.1 bouyer } 205 1.1 bouyer 206 1.1 bouyer 207 1.1 bouyer /* 208 1.1 bouyer * Allocate physical memory from the given physical address range. 209 1.1 bouyer * Called by DMA-safe memory allocation methods. 210 1.1 bouyer * We need our own version to deal with physical vs machine addresses. 211 1.1 bouyer */ 212 1.1 bouyer int 213 1.1 bouyer _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, 214 1.1 bouyer bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 215 1.1 bouyer int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high) 216 1.1 bouyer { 217 1.7 bouyer bus_addr_t curaddr, lastaddr; 218 1.1 bouyer struct vm_page *m; 219 1.1 bouyer struct pglist mlist; 220 1.1 bouyer int curseg, error; 221 1.1 bouyer int doingrealloc = 0; 222 1.21 bouyer bus_size_t uboundary; 223 1.1 bouyer 224 1.1 bouyer /* Always round the size. */ 225 1.1 bouyer size = round_page(size); 226 1.1 bouyer 227 1.2 bouyer KASSERT((alignment & (alignment - 1)) == 0); 228 1.2 bouyer KASSERT((boundary & (boundary - 1)) == 0); 229 1.21 bouyer KASSERT(boundary >= PAGE_SIZE || boundary == 0); 230 1.21 bouyer 231 1.2 bouyer if (alignment < PAGE_SIZE) 232 1.2 bouyer alignment = PAGE_SIZE; 233 1.2 bouyer 234 1.1 bouyer /* 235 1.1 bouyer * Allocate pages from the VM system. 236 1.21 bouyer * We accept boundaries < size, splitting in multiple segments 237 1.21 bouyer * if needed. uvm_pglistalloc does not, so compute an appropriate 238 1.21 bouyer * boundary: next power of 2 >= size 239 1.1 bouyer */ 240 1.21 bouyer if (boundary == 0) 241 1.21 bouyer uboundary = 0; 242 1.21 bouyer else { 243 1.21 bouyer uboundary = boundary; 244 1.21 bouyer while (uboundary < size) 245 1.21 bouyer uboundary = uboundary << 1; 246 1.21 bouyer } 247 1.21 bouyer error = uvm_pglistalloc(size, 0, avail_end, alignment, uboundary, 248 1.1 bouyer &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 249 1.1 bouyer if (error) 250 1.1 bouyer return (error); 251 1.1 bouyer again: 252 1.1 bouyer 253 1.1 bouyer /* 254 1.1 bouyer * Compute the location, size, and number of segments actually 255 1.1 bouyer * returned by the VM code. 256 1.1 bouyer */ 257 1.1 bouyer m = mlist.tqh_first; 258 1.1 bouyer curseg = 0; 259 1.9 bouyer curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m); 260 1.9 bouyer if (curaddr < low || curaddr >= high) 261 1.9 bouyer goto badaddr; 262 1.1 bouyer segs[curseg].ds_len = PAGE_SIZE; 263 1.11 ad m = m->pageq.queue.tqe_next; 264 1.7 bouyer if ((segs[curseg].ds_addr & (alignment - 1)) != 0) 265 1.2 bouyer goto dorealloc; 266 1.1 bouyer 267 1.11 ad for (; m != NULL; m = m->pageq.queue.tqe_next) { 268 1.7 bouyer curaddr = _BUS_VM_PAGE_TO_BUS(m); 269 1.9 bouyer if (curaddr < low || curaddr >= high) 270 1.9 bouyer goto badaddr; 271 1.21 bouyer if (curaddr == (lastaddr + PAGE_SIZE) && 272 1.21 bouyer (lastaddr & boundary) == (curaddr & boundary)) { 273 1.1 bouyer segs[curseg].ds_len += PAGE_SIZE; 274 1.7 bouyer } else { 275 1.1 bouyer curseg++; 276 1.21 bouyer if (curseg >= nsegs || 277 1.21 bouyer (curaddr & (alignment - 1)) != 0) { 278 1.21 bouyer if (doingrealloc) 279 1.21 bouyer return EFBIG; 280 1.21 bouyer else 281 1.21 bouyer goto dorealloc; 282 1.21 bouyer } 283 1.1 bouyer segs[curseg].ds_addr = curaddr; 284 1.1 bouyer segs[curseg].ds_len = PAGE_SIZE; 285 1.1 bouyer } 286 1.1 bouyer lastaddr = curaddr; 287 1.1 bouyer } 288 1.1 bouyer 289 1.1 bouyer *rsegs = curseg + 1; 290 1.9 bouyer return (0); 291 1.1 bouyer 292 1.9 bouyer badaddr: 293 1.9 bouyer if (doingrealloc == 0) 294 1.9 bouyer goto dorealloc; 295 1.9 bouyer if (curaddr < low) { 296 1.9 bouyer /* no way to enforce this */ 297 1.9 bouyer printf("_xen_bus_dmamem_alloc_range: no way to " 298 1.14 bouyer "enforce address range (0x%" PRIx64 " - 0x%" PRIx64 ")\n", 299 1.14 bouyer (uint64_t)low, (uint64_t)high); 300 1.9 bouyer uvm_pglistfree(&mlist); 301 1.9 bouyer return EINVAL; 302 1.9 bouyer } 303 1.9 bouyer printf("xen_bus_dmamem_alloc_range: " 304 1.9 bouyer "curraddr=0x%lx > high=0x%lx\n", 305 1.9 bouyer (u_long)curaddr, (u_long)high); 306 1.9 bouyer panic("xen_bus_dmamem_alloc_range 1"); 307 1.9 bouyer dorealloc: 308 1.9 bouyer if (doingrealloc == 1) 309 1.9 bouyer panic("_xen_bus_dmamem_alloc_range: " 310 1.9 bouyer "xen_alloc_contig returned " 311 1.9 bouyer "too much segments"); 312 1.9 bouyer doingrealloc = 1; 313 1.9 bouyer /* 314 1.9 bouyer * Too much segments, or memory doesn't fit 315 1.9 bouyer * constraints. Free this memory and 316 1.34 andvar * get a contiguous segment from the hypervisor. 317 1.9 bouyer */ 318 1.9 bouyer uvm_pglistfree(&mlist); 319 1.9 bouyer for (curseg = 0; curseg < nsegs; curseg++) { 320 1.9 bouyer segs[curseg].ds_addr = 0; 321 1.9 bouyer segs[curseg].ds_len = 0; 322 1.9 bouyer } 323 1.9 bouyer error = _xen_alloc_contig(size, alignment, 324 1.21 bouyer &mlist, flags, low, high); 325 1.9 bouyer if (error) 326 1.9 bouyer return error; 327 1.9 bouyer goto again; 328 1.1 bouyer } 329 1.31 bouyer #endif /* XENPV */ 330