drm_vma_manager.c revision 1.3 1 /* $NetBSD: drm_vma_manager.c,v 1.3 2015/06/19 22:51:57 chs Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_vma_manager.c,v 1.3 2015/06/19 22:51:57 chs Exp $");
34
35 #include <sys/kmem.h>
36 #include <sys/rbtree.h>
37 #include <sys/vmem.h>
38
39 #include <drm/drm_vma_manager.h>
40
41 static int
42 drm_vma_node_compare(void *cookie __unused, const void *va, const void *vb)
43 {
44 const struct drm_vma_offset_node *const na = va;
45 const struct drm_vma_offset_node *const nb = vb;
46
47 if (na->von_startpage < nb->von_startpage)
48 return -1;
49 if (na->von_startpage > nb->von_startpage)
50 return +1;
51 return 0;
52 }
53
54 static int
55 drm_vma_node_compare_key(void *cookie __unused, const void *vn, const void *vk)
56 {
57 const struct drm_vma_offset_node *const n = vn;
58 const vmem_addr_t *const k = vk;
59
60 if (n->von_startpage < *k)
61 return -1;
62 if (n->von_startpage > *k)
63 return +1;
64 return 0;
65 }
66
67 static const rb_tree_ops_t drm_vma_node_rb_ops = {
68 .rbto_compare_nodes = &drm_vma_node_compare,
69 .rbto_compare_key = &drm_vma_node_compare_key,
70 .rbto_node_offset = offsetof(struct drm_vma_offset_node, von_rb_node),
71 .rbto_context = NULL,
72 };
73
74 static int
75 drm_vma_file_compare(void *cookie __unused, const void *va, const void *vb)
76 {
77 const struct drm_vma_offset_file *const fa = va;
78 const struct drm_vma_offset_file *const fb = vb;
79
80 if (fa->vof_file < fb->vof_file)
81 return -1;
82 if (fa->vof_file > fb->vof_file)
83 return +1;
84 return 0;
85 }
86
87 static int
88 drm_vma_file_compare_key(void *cookie __unused, const void *vf, const void *vk)
89 {
90 const struct drm_vma_offset_file *const f = vf;
91 const struct file *const k = vk;
92
93 if (f->vof_file < k)
94 return -1;
95 if (f->vof_file > k)
96 return +1;
97 return 0;
98 }
99
100 static const rb_tree_ops_t drm_vma_file_rb_ops = {
101 .rbto_compare_nodes = &drm_vma_file_compare,
102 .rbto_compare_key = &drm_vma_file_compare_key,
103 .rbto_node_offset = offsetof(struct drm_vma_offset_file, vof_rb_node),
104 .rbto_context = NULL,
105 };
106
107 void
108 drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
109 unsigned long startpage, unsigned long npages)
110 {
111
112 rw_init(&mgr->vom_lock);
113 rb_tree_init(&mgr->vom_nodes, &drm_vma_node_rb_ops);
114 mgr->vom_vmem = vmem_create("drm_vma", startpage, npages, 1,
115 NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
116 }
117
118 void
119 drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
120 {
121
122 vmem_destroy(mgr->vom_vmem);
123 #if 0
124 rb_tree_destroy(&mgr->vom_nodes);
125 #endif
126 rw_destroy(&mgr->vom_lock);
127 }
128
129 void
131 drm_vma_node_init(struct drm_vma_offset_node *node)
132 {
133 static const struct drm_vma_offset_node zero_node;
134
135 *node = zero_node;
136
137 rw_init(&node->von_lock);
138 node->von_startpage = 0;
139 node->von_npages = 0;
140 rb_tree_init(&node->von_files, &drm_vma_file_rb_ops);
141 }
142
143 void
144 drm_vma_node_destroy(struct drm_vma_offset_node *node)
145 {
146
147 #if 0
148 rb_tree_destroy(&node->von_files);
149 #endif
150 KASSERT(node->von_startpage == 0);
151 KASSERT(node->von_npages == 0);
152 rw_destroy(&node->von_lock);
153 }
154
155 int
156 drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
157 struct drm_vma_offset_node *node, unsigned long npages)
158 {
159 vmem_size_t startpage;
160 struct drm_vma_offset_node *collision __diagused;
161 int error;
162
163 KASSERT(npages != 0);
164
165 if (0 < node->von_npages)
166 return 0;
167
168 error = vmem_alloc(mgr->vom_vmem, npages, VM_NOSLEEP|VM_BESTFIT,
169 &startpage);
170 if (error) {
171 if (error == ENOMEM)
172 error = ENOSPC;
173 /* XXX errno NetBSD->Linux */
174 return -error;
175 }
176
177 node->von_startpage = startpage;
178 node->von_npages = npages;
179
180 rw_enter(&node->von_lock, RW_WRITER);
181 collision = rb_tree_insert_node(&mgr->vom_nodes, node);
182 KASSERT(collision == node);
183 rw_exit(&node->von_lock);
184
185 return 0;
186 }
187
188 void
189 drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
190 struct drm_vma_offset_node *node)
191 {
192
193 if (node->von_npages == 0)
194 return;
195
196 rw_enter(&node->von_lock, RW_WRITER);
197 rb_tree_remove_node(&mgr->vom_nodes, node);
198 rw_exit(&node->von_lock);
199
200 vmem_free(mgr->vom_vmem, node->von_startpage, node->von_npages);
201
202 node->von_npages = 0;
203 node->von_startpage = 0;
204 }
205
206 void
208 drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
209 {
210
211 rw_enter(&mgr->vom_lock, RW_READER);
212 }
213
214 void
215 drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
216 {
217
218 rw_exit(&mgr->vom_lock);
219 }
220
221 struct drm_vma_offset_node *
222 drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
223 unsigned long startpage, unsigned long npages)
224 {
225 const vmem_addr_t key = startpage;
226 struct drm_vma_offset_node *node;
227
228 KASSERT(rw_lock_held(&mgr->vom_lock));
229
230 node = rb_tree_find_node_leq(&mgr->vom_nodes, &key);
231 if (node == NULL)
232 return NULL;
233 KASSERT(node->von_startpage <= startpage);
234 if (npages < node->von_npages)
235 return NULL;
236 if (node->von_npages - npages < startpage - node->von_startpage)
237 return NULL;
238
239 return node;
240 }
241
242 struct drm_vma_offset_node *
243 drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
244 unsigned long startpage, unsigned long npages)
245 {
246 const vmem_addr_t key = startpage;
247 struct drm_vma_offset_node *node;
248
249 rw_enter(&mgr->vom_lock, RW_READER);
250
251 node = rb_tree_find_node(&mgr->vom_nodes, &key);
252 if (node == NULL)
253 goto out;
254 KASSERT(node->von_startpage == startpage);
255 if (node->von_npages != npages) {
256 node = NULL;
257 goto out;
258 }
259
260 out: rw_exit(&mgr->vom_lock);
261 return node;
262 }
263
264 int
266 drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *file)
267 {
268 struct drm_vma_offset_file *new, *old;
269
270 new = kmem_alloc(sizeof(*new), KM_NOSLEEP);
271 if (new == NULL)
272 return -ENOMEM;
273 new->vof_file = file;
274
275 rw_enter(&node->von_lock, RW_WRITER);
276 old = rb_tree_insert_node(&node->von_files, new);
277 rw_exit(&node->von_lock);
278
279 if (old != new) /* collision */
280 kmem_free(new, sizeof(*new));
281
282 return 0;
283 }
284
285 void
286 drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *file)
287 {
288
289 rw_enter(&node->von_lock, RW_WRITER);
290 struct drm_vma_offset_file *const found =
291 rb_tree_find_node(&node->von_files, file);
292 if (found != NULL)
293 rb_tree_remove_node(&node->von_files, found);
294 rw_exit(&node->von_lock);
295 if (found != NULL)
296 kmem_free(found, sizeof(*found));
297 }
298
299 bool
300 drm_vma_node_is_allowed(struct drm_vma_offset_node *node, struct file *file)
301 {
302
303 rw_enter(&node->von_lock, RW_READER);
304 const bool allowed =
305 (rb_tree_find_node(&node->von_files, file) != NULL);
306 rw_exit(&node->von_lock);
307
308 return allowed;
309 }
310
311 int
312 drm_vma_node_verify_access(struct drm_vma_offset_node *node, struct file *file)
313 {
314
315 if (!drm_vma_node_is_allowed(node, file))
316 return -EACCES;
317
318 return 0;
319 }
320