drm_gem_cma_helper.c revision 1.1 1 /* $NetBSD: drm_gem_cma_helper.c,v 1.1 2017/12/26 14:53:12 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: drm_gem_cma_helper.c,v 1.1 2017/12/26 14:53:12 jmcneill Exp $");
31
32 #include <drm/drmP.h>
33 #include <drm/drm_gem_cma_helper.h>
34
35 #include <uvm/uvm.h>
36
37 struct drm_gem_cma_object *
38 drm_gem_cma_create(struct drm_device *ddev, unsigned int size)
39 {
40 struct drm_gem_cma_object *obj;
41 int error, nsegs;
42
43 obj = kmem_zalloc(sizeof(*obj), KM_SLEEP);
44 obj->dmat = ddev->bus_dmat;
45 obj->dmasize = size;
46
47 error = bus_dmamem_alloc(obj->dmat, obj->dmasize, PAGE_SIZE, 0,
48 obj->dmasegs, 1, &nsegs, BUS_DMA_WAITOK);
49 if (error)
50 goto failed;
51 error = bus_dmamem_map(obj->dmat, obj->dmasegs, nsegs,
52 obj->dmasize, &obj->vaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
53 if (error)
54 goto free;
55 error = bus_dmamap_create(obj->dmat, obj->dmasize, 1,
56 obj->dmasize, 0, BUS_DMA_WAITOK, &obj->dmamap);
57 if (error)
58 goto unmap;
59 error = bus_dmamap_load(obj->dmat, obj->dmamap, obj->vaddr,
60 obj->dmasize, NULL, BUS_DMA_WAITOK);
61 if (error)
62 goto destroy;
63
64 memset(obj->vaddr, 0, obj->dmasize);
65
66 drm_gem_private_object_init(ddev, &obj->base, size);
67
68 return obj;
69
70 destroy:
71 bus_dmamap_destroy(obj->dmat, obj->dmamap);
72 unmap:
73 bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
74 free:
75 bus_dmamem_free(obj->dmat, obj->dmasegs, nsegs);
76 failed:
77 kmem_free(obj, sizeof(*obj));
78
79 return NULL;
80 }
81
82 static void
83 drm_gem_cma_obj_free(struct drm_gem_cma_object *obj)
84 {
85 bus_dmamap_unload(obj->dmat, obj->dmamap);
86 bus_dmamap_destroy(obj->dmat, obj->dmamap);
87 bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
88 bus_dmamem_free(obj->dmat, obj->dmasegs, 1);
89 kmem_free(obj, sizeof(*obj));
90 }
91
92 void
93 drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
94 {
95 struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
96
97 drm_gem_free_mmap_offset(gem_obj);
98 drm_gem_object_release(gem_obj);
99 drm_gem_cma_obj_free(obj);
100 }
101
102 int
103 drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *ddev,
104 struct drm_mode_create_dumb *args)
105 {
106 struct drm_gem_cma_object *obj;
107 uint32_t handle;
108 int error;
109
110 args->pitch = args->width * ((args->bpp + 7) / 8);
111 args->size = args->pitch * args->height;
112 args->size = roundup(args->size, PAGE_SIZE);
113 args->handle = 0;
114
115 obj = drm_gem_cma_create(ddev, args->size);
116 if (obj == NULL)
117 return -ENOMEM;
118
119 error = drm_gem_handle_create(file_priv, &obj->base, &handle);
120 drm_gem_object_unreference_unlocked(&obj->base);
121 if (error) {
122 drm_gem_cma_obj_free(obj);
123 return error;
124 }
125
126 args->handle = handle;
127
128 return 0;
129 }
130
131 int
132 drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, struct drm_device *ddev,
133 uint32_t handle, uint64_t *offset)
134 {
135 struct drm_gem_object *gem_obj;
136 struct drm_gem_cma_object *obj;
137 int error;
138
139 gem_obj = drm_gem_object_lookup(ddev, file_priv, handle);
140 if (gem_obj == NULL)
141 return -ENOENT;
142
143 obj = to_drm_gem_cma_obj(gem_obj);
144
145 if (drm_vma_node_has_offset(&obj->base.vma_node) == 0) {
146 error = drm_gem_create_mmap_offset(&obj->base);
147 if (error)
148 goto done;
149 } else {
150 error = 0;
151 }
152
153 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
154
155 done:
156 drm_gem_object_unreference_unlocked(&obj->base);
157
158 return error;
159 }
160
161 static int
162 drm_gem_cma_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
163 struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
164 int flags)
165 {
166 struct vm_map_entry *entry = ufi->entry;
167 struct uvm_object *uobj = entry->object.uvm_obj;
168 struct drm_gem_object *gem_obj =
169 container_of(uobj, struct drm_gem_object, gemo_uvmobj);
170 struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
171 off_t curr_offset;
172 vaddr_t curr_va;
173 paddr_t paddr, mdpgno;
174 u_int mmapflags;
175 int lcv, retval;
176 vm_prot_t mapprot;
177
178 if (UVM_ET_ISCOPYONWRITE(entry))
179 return -EIO;
180
181 curr_offset = entry->offset + (vaddr - entry->start);
182 curr_va = vaddr;
183
184 retval = 0;
185 for (lcv = 0; lcv < npages; lcv++, curr_offset += PAGE_SIZE,
186 curr_va += PAGE_SIZE) {
187 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
188 continue;
189 if (pps[lcv] == PGO_DONTCARE)
190 continue;
191
192 mdpgno = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
193 curr_offset, access_type, BUS_DMA_PREFETCHABLE);
194 if (mdpgno == -1) {
195 retval = -EIO;
196 break;
197 }
198 paddr = pmap_phys_address(mdpgno);
199 mmapflags = pmap_mmap_flags(mdpgno);
200 mapprot = ufi->entry->protection;
201
202 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
203 PMAP_CANFAIL | mapprot | mmapflags) != 0) {
204 pmap_update(ufi->orig_map->pmap);
205 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
206 uvm_wait("drm_gem_cma_fault");
207 return -ERESTART;
208 }
209 }
210
211 pmap_update(ufi->orig_map->pmap);
212 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
213
214 return retval;
215 }
216
217 const struct uvm_pagerops drm_gem_cma_uvm_ops = {
218 .pgo_reference = drm_gem_pager_reference,
219 .pgo_detach = drm_gem_pager_detach,
220 .pgo_fault = drm_gem_cma_fault,
221 };
222