1 /* $NetBSD: i915_mm.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $ */ 2 3 /* 4 * Copyright 2014 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: i915_mm.c,v 1.2 2021/12/18 23:45:28 riastradh Exp $"); 29 30 #include <linux/mm.h> 31 #include <linux/io-mapping.h> 32 33 #include <asm/pgtable.h> 34 35 #include "i915_drv.h" 36 37 struct remap_pfn { 38 struct mm_struct *mm; 39 unsigned long pfn; 40 pgprot_t prot; 41 42 struct sgt_iter sgt; 43 resource_size_t iobase; 44 }; 45 46 static int remap_pfn(pte_t *pte, unsigned long addr, void *data) 47 { 48 struct remap_pfn *r = data; 49 50 /* Special PTE are not associated with any struct page */ 51 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); 52 r->pfn++; 53 54 return 0; 55 } 56 57 #define use_dma(io) ((io) != -1) 58 59 static inline unsigned long sgt_pfn(const struct remap_pfn *r) 60 { 61 if (use_dma(r->iobase)) 62 return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; 63 else 64 return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); 65 } 66 67 static int remap_sg(pte_t *pte, unsigned long addr, void *data) 68 { 69 struct remap_pfn *r = data; 70 71 if (GEM_WARN_ON(!r->sgt.pfn)) 72 return -EINVAL; 73 74 /* Special PTE are not associated with any struct page */ 75 set_pte_at(r->mm, addr, pte, 76 pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); 77 r->pfn++; /* track insertions in case we need to unwind later */ 78 79 r->sgt.curr += PAGE_SIZE; 80 if (r->sgt.curr >= r->sgt.max) 81 r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase)); 82 83 return 0; 84 } 85 86 /** 87 * remap_io_mapping - remap an IO mapping to userspace 88 * @vma: user vma to map to 89 * @addr: target user address to start at 90 * @pfn: physical address of kernel memory 91 * @size: size of map area 92 * @iomap: the source io_mapping 93 * 94 * Note: this is only safe if the mm semaphore is held when called. 95 */ 96 int remap_io_mapping(struct vm_area_struct *vma, 97 unsigned long addr, unsigned long pfn, unsigned long size, 98 struct io_mapping *iomap) 99 { 100 struct remap_pfn r; 101 int err; 102 103 #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) 104 GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); 105 106 /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ 107 r.mm = vma->vm_mm; 108 r.pfn = pfn; 109 r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | 110 (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); 111 112 err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); 113 if (unlikely(err)) { 114 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); 115 return err; 116 } 117 118 return 0; 119 } 120 121 /** 122 * remap_io_sg - remap an IO mapping to userspace 123 * @vma: user vma to map to 124 * @addr: target user address to start at 125 * @size: size of map area 126 * @sgl: Start sg entry 127 * @iobase: Use stored dma address offset by this address or pfn if -1 128 * 129 * Note: this is only safe if the mm semaphore is held when called. 130 */ 131 int remap_io_sg(struct vm_area_struct *vma, 132 unsigned long addr, unsigned long size, 133 struct scatterlist *sgl, resource_size_t iobase) 134 { 135 struct remap_pfn r = { 136 .mm = vma->vm_mm, 137 .prot = vma->vm_page_prot, 138 .sgt = __sgt_iter(sgl, use_dma(iobase)), 139 .iobase = iobase, 140 }; 141 int err; 142 143 /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ 144 GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); 145 146 if (!use_dma(iobase)) 147 flush_cache_range(vma, addr, size); 148 149 err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); 150 if (unlikely(err)) { 151 zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); 152 return err; 153 } 154 155 return 0; 156 } 157