drm_vma_manager.c revision 1.1.4.2.4.1 1 /* $NetBSD: drm_vma_manager.c,v 1.1.4.2.4.1 2017/01/18 08:46:45 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_vma_manager.c,v 1.1.4.2.4.1 2017/01/18 08:46:45 skrll Exp $");
34
35 #include <sys/kmem.h>
36 #include <sys/rbtree.h>
37 #include <sys/vmem.h>
38
39 #include <drm/drm_vma_manager.h>
40
41 static int
42 drm_vma_node_compare(void *cookie __unused, const void *va, const void *vb)
43 {
44 const struct drm_vma_offset_node *const na = va;
45 const struct drm_vma_offset_node *const nb = vb;
46
47 if (na->von_startpage < nb->von_startpage)
48 return -1;
49 if (na->von_startpage > nb->von_startpage)
50 return +1;
51 return 0;
52 }
53
54 static int
55 drm_vma_node_compare_key(void *cookie __unused, const void *vn, const void *vk)
56 {
57 const struct drm_vma_offset_node *const n = vn;
58 const vmem_addr_t *const k = vk;
59
60 if (n->von_startpage < *k)
61 return -1;
62 if (n->von_startpage > *k)
63 return +1;
64 return 0;
65 }
66
67 static const rb_tree_ops_t drm_vma_node_rb_ops = {
68 .rbto_compare_nodes = &drm_vma_node_compare,
69 .rbto_compare_key = &drm_vma_node_compare_key,
70 .rbto_node_offset = offsetof(struct drm_vma_offset_node, von_rb_node),
71 .rbto_context = NULL,
72 };
73
74 static int
75 drm_vma_file_compare(void *cookie __unused, const void *va, const void *vb)
76 {
77 const struct drm_vma_offset_file *const fa = va;
78 const struct drm_vma_offset_file *const fb = vb;
79
80 if (fa->vof_file < fb->vof_file)
81 return -1;
82 if (fa->vof_file > fb->vof_file)
83 return +1;
84 return 0;
85 }
86
87 static int
88 drm_vma_file_compare_key(void *cookie __unused, const void *vf, const void *vk)
89 {
90 const struct drm_vma_offset_file *const f = vf;
91 const struct file *const k = vk;
92
93 if (f->vof_file < k)
94 return -1;
95 if (f->vof_file > k)
96 return +1;
97 return 0;
98 }
99
100 static const rb_tree_ops_t drm_vma_file_rb_ops = {
101 .rbto_compare_nodes = &drm_vma_file_compare,
102 .rbto_compare_key = &drm_vma_file_compare_key,
103 .rbto_node_offset = offsetof(struct drm_vma_offset_file, vof_rb_node),
104 .rbto_context = NULL,
105 };
106
107 void
108 drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
109 unsigned long startpage, unsigned long npages)
110 {
111
112 rw_init(&mgr->vom_lock);
113 rb_tree_init(&mgr->vom_nodes, &drm_vma_node_rb_ops);
114 mgr->vom_vmem = vmem_create("drm_vma", startpage, npages, 1,
115 NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
116 }
117
118 void
119 drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
120 {
121
122 vmem_destroy(mgr->vom_vmem);
123 KASSERTMSG((RB_TREE_MIN(&mgr->vom_nodes) == NULL),
124 "drm vma offset manager %p not empty", mgr);
125 #if 0
126 rb_tree_destroy(&mgr->vom_nodes);
127 #endif
128 rw_destroy(&mgr->vom_lock);
129 }
130
131 void
133 drm_vma_node_init(struct drm_vma_offset_node *node)
134 {
135 static const struct drm_vma_offset_node zero_node;
136
137 *node = zero_node;
138
139 rw_init(&node->von_lock);
140 node->von_startpage = 0;
141 node->von_npages = 0;
142 rb_tree_init(&node->von_files, &drm_vma_file_rb_ops);
143 }
144
145 void
146 drm_vma_node_destroy(struct drm_vma_offset_node *node)
147 {
148
149 KASSERTMSG((RB_TREE_MIN(&node->von_files) == NULL),
150 "drm vma node %p not empty", node);
151 #if 0
152 rb_tree_destroy(&node->von_files);
153 #endif
154 KASSERT(node->von_startpage == 0);
155 KASSERT(node->von_npages == 0);
156 rw_destroy(&node->von_lock);
157 }
158
159 int
160 drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
161 struct drm_vma_offset_node *node, unsigned long npages)
162 {
163 vmem_size_t startpage;
164 struct drm_vma_offset_node *collision __diagused;
165 int error;
166
167 KASSERT(npages != 0);
168
169 if (0 < node->von_npages)
170 return 0;
171
172 error = vmem_alloc(mgr->vom_vmem, npages, VM_NOSLEEP|VM_BESTFIT,
173 &startpage);
174 if (error) {
175 if (error == ENOMEM)
176 error = ENOSPC;
177 /* XXX errno NetBSD->Linux */
178 return -error;
179 }
180
181 node->von_startpage = startpage;
182 node->von_npages = npages;
183
184 rw_enter(&mgr->vom_lock, RW_WRITER);
185 collision = rb_tree_insert_node(&mgr->vom_nodes, node);
186 KASSERT(collision == node);
187 rw_exit(&mgr->vom_lock);
188
189 return 0;
190 }
191
192 void
193 drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
194 struct drm_vma_offset_node *node)
195 {
196
197 if (node->von_npages == 0)
198 return;
199
200 rw_enter(&mgr->vom_lock, RW_WRITER);
201 rb_tree_remove_node(&mgr->vom_nodes, node);
202 rw_exit(&mgr->vom_lock);
203
204 vmem_free(mgr->vom_vmem, node->von_startpage, node->von_npages);
205
206 node->von_npages = 0;
207 node->von_startpage = 0;
208 }
209
210 void
212 drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
213 {
214
215 rw_enter(&mgr->vom_lock, RW_READER);
216 }
217
218 void
219 drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
220 {
221
222 rw_exit(&mgr->vom_lock);
223 }
224
225 struct drm_vma_offset_node *
226 drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
227 unsigned long startpage, unsigned long npages)
228 {
229 const vmem_addr_t key = startpage;
230 struct drm_vma_offset_node *node;
231
232 KASSERT(rw_lock_held(&mgr->vom_lock));
233
234 node = rb_tree_find_node_leq(&mgr->vom_nodes, &key);
235 if (node == NULL)
236 return NULL;
237 KASSERT(node->von_startpage <= startpage);
238 if (npages < node->von_npages)
239 return NULL;
240 if (node->von_npages - npages < startpage - node->von_startpage)
241 return NULL;
242
243 return node;
244 }
245
246 struct drm_vma_offset_node *
247 drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
248 unsigned long startpage, unsigned long npages)
249 {
250 const vmem_addr_t key = startpage;
251 struct drm_vma_offset_node *node;
252
253 rw_enter(&mgr->vom_lock, RW_READER);
254
255 node = rb_tree_find_node(&mgr->vom_nodes, &key);
256 if (node == NULL)
257 goto out;
258 KASSERT(node->von_startpage == startpage);
259 if (node->von_npages != npages) {
260 node = NULL;
261 goto out;
262 }
263
264 out: rw_exit(&mgr->vom_lock);
265 return node;
266 }
267
268 int
270 drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *file)
271 {
272 struct drm_vma_offset_file *new, *old;
273
274 new = kmem_alloc(sizeof(*new), KM_NOSLEEP);
275 if (new == NULL)
276 return -ENOMEM;
277 new->vof_file = file;
278
279 rw_enter(&node->von_lock, RW_WRITER);
280 old = rb_tree_insert_node(&node->von_files, new);
281 rw_exit(&node->von_lock);
282
283 if (old != new) /* collision */
284 kmem_free(new, sizeof(*new));
285
286 return 0;
287 }
288
289 void
290 drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *file)
291 {
292
293 rw_enter(&node->von_lock, RW_WRITER);
294 struct drm_vma_offset_file *const found =
295 rb_tree_find_node(&node->von_files, file);
296 if (found != NULL)
297 rb_tree_remove_node(&node->von_files, found);
298 rw_exit(&node->von_lock);
299 if (found != NULL)
300 kmem_free(found, sizeof(*found));
301 }
302
303 bool
304 drm_vma_node_is_allowed(struct drm_vma_offset_node *node, struct file *file)
305 {
306
307 rw_enter(&node->von_lock, RW_READER);
308 const bool allowed =
309 (rb_tree_find_node(&node->von_files, file) != NULL);
310 rw_exit(&node->von_lock);
311
312 return allowed;
313 }
314
315 int
316 drm_vma_node_verify_access(struct drm_vma_offset_node *node, struct file *file)
317 {
318
319 if (!drm_vma_node_is_allowed(node, file))
320 return -EACCES;
321
322 return 0;
323 }
324