drm_gem.c revision 1.1 1 1.1 riastrad /*
2 1.1 riastrad * Copyright 2008 Intel Corporation
3 1.1 riastrad *
4 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a
5 1.1 riastrad * copy of this software and associated documentation files (the "Software"),
6 1.1 riastrad * to deal in the Software without restriction, including without limitation
7 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the
9 1.1 riastrad * Software is furnished to do so, subject to the following conditions:
10 1.1 riastrad *
11 1.1 riastrad * The above copyright notice and this permission notice (including the next
12 1.1 riastrad * paragraph) shall be included in all copies or substantial portions of the
13 1.1 riastrad * Software.
14 1.1 riastrad *
15 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 1.1 riastrad * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 1.1 riastrad * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 1.1 riastrad * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 1.1 riastrad * IN THE SOFTWARE.
22 1.1 riastrad *
23 1.1 riastrad * Authors:
24 1.1 riastrad * Eric Anholt <eric (at) anholt.net>
25 1.1 riastrad *
26 1.1 riastrad */
27 1.1 riastrad
28 1.1 riastrad #include <linux/types.h>
29 1.1 riastrad #include <linux/slab.h>
30 1.1 riastrad #include <linux/mm.h>
31 1.1 riastrad #include <linux/uaccess.h>
32 1.1 riastrad #include <linux/fs.h>
33 1.1 riastrad #include <linux/file.h>
34 1.1 riastrad #include <linux/module.h>
35 1.1 riastrad #include <linux/mman.h>
36 1.1 riastrad #include <linux/pagemap.h>
37 1.1 riastrad #include <linux/shmem_fs.h>
38 1.1 riastrad #include <linux/dma-buf.h>
39 1.1 riastrad #include <drm/drmP.h>
40 1.1 riastrad
41 1.1 riastrad /** @file drm_gem.c
42 1.1 riastrad *
43 1.1 riastrad * This file provides some of the base ioctls and library routines for
44 1.1 riastrad * the graphics memory manager implemented by each device driver.
45 1.1 riastrad *
46 1.1 riastrad * Because various devices have different requirements in terms of
47 1.1 riastrad * synchronization and migration strategies, implementing that is left up to
48 1.1 riastrad * the driver, and all that the general API provides should be generic --
49 1.1 riastrad * allocating objects, reading/writing data with the cpu, freeing objects.
50 1.1 riastrad * Even there, platform-dependent optimizations for reading/writing data with
51 1.1 riastrad * the CPU mean we'll likely hook those out to driver-specific calls. However,
52 1.1 riastrad * the DRI2 implementation wants to have at least allocate/mmap be generic.
53 1.1 riastrad *
54 1.1 riastrad * The goal was to have swap-backed object allocation managed through
55 1.1 riastrad * struct file. However, file descriptors as handles to a struct file have
56 1.1 riastrad * two major failings:
57 1.1 riastrad * - Process limits prevent more than 1024 or so being used at a time by
58 1.1 riastrad * default.
59 1.1 riastrad * - Inability to allocate high fds will aggravate the X Server's select()
60 1.1 riastrad * handling, and likely that of many GL client applications as well.
61 1.1 riastrad *
62 1.1 riastrad * This led to a plan of using our own integer IDs (called handles, following
63 1.1 riastrad * DRM terminology) to mimic fds, and implement the fd syscalls we need as
64 1.1 riastrad * ioctls. The objects themselves will still include the struct file so
65 1.1 riastrad * that we can transition to fds if the required kernel infrastructure shows
66 1.1 riastrad * up at a later date, and as our interface with shmfs for memory allocation.
67 1.1 riastrad */
68 1.1 riastrad
69 1.1 riastrad /*
70 1.1 riastrad * We make up offsets for buffer objects so we can recognize them at
71 1.1 riastrad * mmap time.
72 1.1 riastrad */
73 1.1 riastrad
74 1.1 riastrad /* pgoff in mmap is an unsigned long, so we need to make sure that
75 1.1 riastrad * the faked up offset will fit
76 1.1 riastrad */
77 1.1 riastrad
78 1.1 riastrad #if BITS_PER_LONG == 64
79 1.1 riastrad #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
80 1.1 riastrad #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
81 1.1 riastrad #else
82 1.1 riastrad #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
83 1.1 riastrad #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
84 1.1 riastrad #endif
85 1.1 riastrad
86 1.1 riastrad /**
87 1.1 riastrad * Initialize the GEM device fields
88 1.1 riastrad */
89 1.1 riastrad
90 1.1 riastrad int
91 1.1 riastrad drm_gem_init(struct drm_device *dev)
92 1.1 riastrad {
93 1.1 riastrad struct drm_gem_mm *mm;
94 1.1 riastrad
95 1.1 riastrad spin_lock_init(&dev->object_name_lock);
96 1.1 riastrad idr_init(&dev->object_name_idr);
97 1.1 riastrad
98 1.1 riastrad mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
99 1.1 riastrad if (!mm) {
100 1.1 riastrad DRM_ERROR("out of memory\n");
101 1.1 riastrad return -ENOMEM;
102 1.1 riastrad }
103 1.1 riastrad
104 1.1 riastrad dev->mm_private = mm;
105 1.1 riastrad
106 1.1 riastrad if (drm_ht_create(&mm->offset_hash, 12)) {
107 1.1 riastrad kfree(mm);
108 1.1 riastrad return -ENOMEM;
109 1.1 riastrad }
110 1.1 riastrad
111 1.1 riastrad if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 1.1 riastrad DRM_FILE_PAGE_OFFSET_SIZE)) {
113 1.1 riastrad drm_ht_remove(&mm->offset_hash);
114 1.1 riastrad kfree(mm);
115 1.1 riastrad return -ENOMEM;
116 1.1 riastrad }
117 1.1 riastrad
118 1.1 riastrad return 0;
119 1.1 riastrad }
120 1.1 riastrad
121 1.1 riastrad void
122 1.1 riastrad drm_gem_destroy(struct drm_device *dev)
123 1.1 riastrad {
124 1.1 riastrad struct drm_gem_mm *mm = dev->mm_private;
125 1.1 riastrad
126 1.1 riastrad drm_mm_takedown(&mm->offset_manager);
127 1.1 riastrad drm_ht_remove(&mm->offset_hash);
128 1.1 riastrad kfree(mm);
129 1.1 riastrad dev->mm_private = NULL;
130 1.1 riastrad }
131 1.1 riastrad
132 1.1 riastrad /**
133 1.1 riastrad * Initialize an already allocated GEM object of the specified size with
134 1.1 riastrad * shmfs backing store.
135 1.1 riastrad */
136 1.1 riastrad int drm_gem_object_init(struct drm_device *dev,
137 1.1 riastrad struct drm_gem_object *obj, size_t size)
138 1.1 riastrad {
139 1.1 riastrad BUG_ON((size & (PAGE_SIZE - 1)) != 0);
140 1.1 riastrad
141 1.1 riastrad obj->dev = dev;
142 1.1 riastrad obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143 1.1 riastrad if (IS_ERR(obj->filp))
144 1.1 riastrad return PTR_ERR(obj->filp);
145 1.1 riastrad
146 1.1 riastrad kref_init(&obj->refcount);
147 1.1 riastrad atomic_set(&obj->handle_count, 0);
148 1.1 riastrad obj->size = size;
149 1.1 riastrad
150 1.1 riastrad return 0;
151 1.1 riastrad }
152 1.1 riastrad EXPORT_SYMBOL(drm_gem_object_init);
153 1.1 riastrad
154 1.1 riastrad /**
155 1.1 riastrad * Initialize an already allocated GEM object of the specified size with
156 1.1 riastrad * no GEM provided backing store. Instead the caller is responsible for
157 1.1 riastrad * backing the object and handling it.
158 1.1 riastrad */
159 1.1 riastrad int drm_gem_private_object_init(struct drm_device *dev,
160 1.1 riastrad struct drm_gem_object *obj, size_t size)
161 1.1 riastrad {
162 1.1 riastrad BUG_ON((size & (PAGE_SIZE - 1)) != 0);
163 1.1 riastrad
164 1.1 riastrad obj->dev = dev;
165 1.1 riastrad obj->filp = NULL;
166 1.1 riastrad
167 1.1 riastrad kref_init(&obj->refcount);
168 1.1 riastrad atomic_set(&obj->handle_count, 0);
169 1.1 riastrad obj->size = size;
170 1.1 riastrad
171 1.1 riastrad return 0;
172 1.1 riastrad }
173 1.1 riastrad EXPORT_SYMBOL(drm_gem_private_object_init);
174 1.1 riastrad
175 1.1 riastrad /**
176 1.1 riastrad * Allocate a GEM object of the specified size with shmfs backing store
177 1.1 riastrad */
178 1.1 riastrad struct drm_gem_object *
179 1.1 riastrad drm_gem_object_alloc(struct drm_device *dev, size_t size)
180 1.1 riastrad {
181 1.1 riastrad struct drm_gem_object *obj;
182 1.1 riastrad
183 1.1 riastrad obj = kzalloc(sizeof(*obj), GFP_KERNEL);
184 1.1 riastrad if (!obj)
185 1.1 riastrad goto free;
186 1.1 riastrad
187 1.1 riastrad if (drm_gem_object_init(dev, obj, size) != 0)
188 1.1 riastrad goto free;
189 1.1 riastrad
190 1.1 riastrad if (dev->driver->gem_init_object != NULL &&
191 1.1 riastrad dev->driver->gem_init_object(obj) != 0) {
192 1.1 riastrad goto fput;
193 1.1 riastrad }
194 1.1 riastrad return obj;
195 1.1 riastrad fput:
196 1.1 riastrad /* Object_init mangles the global counters - readjust them. */
197 1.1 riastrad fput(obj->filp);
198 1.1 riastrad free:
199 1.1 riastrad kfree(obj);
200 1.1 riastrad return NULL;
201 1.1 riastrad }
202 1.1 riastrad EXPORT_SYMBOL(drm_gem_object_alloc);
203 1.1 riastrad
204 1.1 riastrad static void
205 1.1 riastrad drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
206 1.1 riastrad {
207 1.1 riastrad if (obj->import_attach) {
208 1.1 riastrad drm_prime_remove_imported_buf_handle(&filp->prime,
209 1.1 riastrad obj->import_attach->dmabuf);
210 1.1 riastrad }
211 1.1 riastrad if (obj->export_dma_buf) {
212 1.1 riastrad drm_prime_remove_imported_buf_handle(&filp->prime,
213 1.1 riastrad obj->export_dma_buf);
214 1.1 riastrad }
215 1.1 riastrad }
216 1.1 riastrad
217 1.1 riastrad /**
218 1.1 riastrad * Removes the mapping from handle to filp for this object.
219 1.1 riastrad */
220 1.1 riastrad int
221 1.1 riastrad drm_gem_handle_delete(struct drm_file *filp, u32 handle)
222 1.1 riastrad {
223 1.1 riastrad struct drm_device *dev;
224 1.1 riastrad struct drm_gem_object *obj;
225 1.1 riastrad
226 1.1 riastrad /* This is gross. The idr system doesn't let us try a delete and
227 1.1 riastrad * return an error code. It just spews if you fail at deleting.
228 1.1 riastrad * So, we have to grab a lock around finding the object and then
229 1.1 riastrad * doing the delete on it and dropping the refcount, or the user
230 1.1 riastrad * could race us to double-decrement the refcount and cause a
231 1.1 riastrad * use-after-free later. Given the frequency of our handle lookups,
232 1.1 riastrad * we may want to use ida for number allocation and a hash table
233 1.1 riastrad * for the pointers, anyway.
234 1.1 riastrad */
235 1.1 riastrad spin_lock(&filp->table_lock);
236 1.1 riastrad
237 1.1 riastrad /* Check if we currently have a reference on the object */
238 1.1 riastrad obj = idr_find(&filp->object_idr, handle);
239 1.1 riastrad if (obj == NULL) {
240 1.1 riastrad spin_unlock(&filp->table_lock);
241 1.1 riastrad return -EINVAL;
242 1.1 riastrad }
243 1.1 riastrad dev = obj->dev;
244 1.1 riastrad
245 1.1 riastrad /* Release reference and decrement refcount. */
246 1.1 riastrad idr_remove(&filp->object_idr, handle);
247 1.1 riastrad spin_unlock(&filp->table_lock);
248 1.1 riastrad
249 1.1 riastrad drm_gem_remove_prime_handles(obj, filp);
250 1.1 riastrad
251 1.1 riastrad if (dev->driver->gem_close_object)
252 1.1 riastrad dev->driver->gem_close_object(obj, filp);
253 1.1 riastrad drm_gem_object_handle_unreference_unlocked(obj);
254 1.1 riastrad
255 1.1 riastrad return 0;
256 1.1 riastrad }
257 1.1 riastrad EXPORT_SYMBOL(drm_gem_handle_delete);
258 1.1 riastrad
259 1.1 riastrad /**
260 1.1 riastrad * Create a handle for this object. This adds a handle reference
261 1.1 riastrad * to the object, which includes a regular reference count. Callers
262 1.1 riastrad * will likely want to dereference the object afterwards.
263 1.1 riastrad */
264 1.1 riastrad int
265 1.1 riastrad drm_gem_handle_create(struct drm_file *file_priv,
266 1.1 riastrad struct drm_gem_object *obj,
267 1.1 riastrad u32 *handlep)
268 1.1 riastrad {
269 1.1 riastrad struct drm_device *dev = obj->dev;
270 1.1 riastrad int ret;
271 1.1 riastrad
272 1.1 riastrad /*
273 1.1 riastrad * Get the user-visible handle using idr.
274 1.1 riastrad */
275 1.1 riastrad again:
276 1.1 riastrad /* ensure there is space available to allocate a handle */
277 1.1 riastrad if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
278 1.1 riastrad return -ENOMEM;
279 1.1 riastrad
280 1.1 riastrad /* do the allocation under our spinlock */
281 1.1 riastrad spin_lock(&file_priv->table_lock);
282 1.1 riastrad ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
283 1.1 riastrad spin_unlock(&file_priv->table_lock);
284 1.1 riastrad if (ret == -EAGAIN)
285 1.1 riastrad goto again;
286 1.1 riastrad else if (ret)
287 1.1 riastrad return ret;
288 1.1 riastrad
289 1.1 riastrad drm_gem_object_handle_reference(obj);
290 1.1 riastrad
291 1.1 riastrad if (dev->driver->gem_open_object) {
292 1.1 riastrad ret = dev->driver->gem_open_object(obj, file_priv);
293 1.1 riastrad if (ret) {
294 1.1 riastrad drm_gem_handle_delete(file_priv, *handlep);
295 1.1 riastrad return ret;
296 1.1 riastrad }
297 1.1 riastrad }
298 1.1 riastrad
299 1.1 riastrad return 0;
300 1.1 riastrad }
301 1.1 riastrad EXPORT_SYMBOL(drm_gem_handle_create);
302 1.1 riastrad
303 1.1 riastrad
304 1.1 riastrad /**
305 1.1 riastrad * drm_gem_free_mmap_offset - release a fake mmap offset for an object
306 1.1 riastrad * @obj: obj in question
307 1.1 riastrad *
308 1.1 riastrad * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
309 1.1 riastrad */
310 1.1 riastrad void
311 1.1 riastrad drm_gem_free_mmap_offset(struct drm_gem_object *obj)
312 1.1 riastrad {
313 1.1 riastrad struct drm_device *dev = obj->dev;
314 1.1 riastrad struct drm_gem_mm *mm = dev->mm_private;
315 1.1 riastrad struct drm_map_list *list = &obj->map_list;
316 1.1 riastrad
317 1.1 riastrad drm_ht_remove_item(&mm->offset_hash, &list->hash);
318 1.1 riastrad drm_mm_put_block(list->file_offset_node);
319 1.1 riastrad kfree(list->map);
320 1.1 riastrad list->map = NULL;
321 1.1 riastrad }
322 1.1 riastrad EXPORT_SYMBOL(drm_gem_free_mmap_offset);
323 1.1 riastrad
324 1.1 riastrad /**
325 1.1 riastrad * drm_gem_create_mmap_offset - create a fake mmap offset for an object
326 1.1 riastrad * @obj: obj in question
327 1.1 riastrad *
328 1.1 riastrad * GEM memory mapping works by handing back to userspace a fake mmap offset
329 1.1 riastrad * it can use in a subsequent mmap(2) call. The DRM core code then looks
330 1.1 riastrad * up the object based on the offset and sets up the various memory mapping
331 1.1 riastrad * structures.
332 1.1 riastrad *
333 1.1 riastrad * This routine allocates and attaches a fake offset for @obj.
334 1.1 riastrad */
335 1.1 riastrad int
336 1.1 riastrad drm_gem_create_mmap_offset(struct drm_gem_object *obj)
337 1.1 riastrad {
338 1.1 riastrad struct drm_device *dev = obj->dev;
339 1.1 riastrad struct drm_gem_mm *mm = dev->mm_private;
340 1.1 riastrad struct drm_map_list *list;
341 1.1 riastrad struct drm_local_map *map;
342 1.1 riastrad int ret;
343 1.1 riastrad
344 1.1 riastrad /* Set the object up for mmap'ing */
345 1.1 riastrad list = &obj->map_list;
346 1.1 riastrad list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
347 1.1 riastrad if (!list->map)
348 1.1 riastrad return -ENOMEM;
349 1.1 riastrad
350 1.1 riastrad map = list->map;
351 1.1 riastrad map->type = _DRM_GEM;
352 1.1 riastrad map->size = obj->size;
353 1.1 riastrad map->handle = obj;
354 1.1 riastrad
355 1.1 riastrad /* Get a DRM GEM mmap offset allocated... */
356 1.1 riastrad list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
357 1.1 riastrad obj->size / PAGE_SIZE, 0, false);
358 1.1 riastrad
359 1.1 riastrad if (!list->file_offset_node) {
360 1.1 riastrad DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
361 1.1 riastrad ret = -ENOSPC;
362 1.1 riastrad goto out_free_list;
363 1.1 riastrad }
364 1.1 riastrad
365 1.1 riastrad list->file_offset_node = drm_mm_get_block(list->file_offset_node,
366 1.1 riastrad obj->size / PAGE_SIZE, 0);
367 1.1 riastrad if (!list->file_offset_node) {
368 1.1 riastrad ret = -ENOMEM;
369 1.1 riastrad goto out_free_list;
370 1.1 riastrad }
371 1.1 riastrad
372 1.1 riastrad list->hash.key = list->file_offset_node->start;
373 1.1 riastrad ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
374 1.1 riastrad if (ret) {
375 1.1 riastrad DRM_ERROR("failed to add to map hash\n");
376 1.1 riastrad goto out_free_mm;
377 1.1 riastrad }
378 1.1 riastrad
379 1.1 riastrad return 0;
380 1.1 riastrad
381 1.1 riastrad out_free_mm:
382 1.1 riastrad drm_mm_put_block(list->file_offset_node);
383 1.1 riastrad out_free_list:
384 1.1 riastrad kfree(list->map);
385 1.1 riastrad list->map = NULL;
386 1.1 riastrad
387 1.1 riastrad return ret;
388 1.1 riastrad }
389 1.1 riastrad EXPORT_SYMBOL(drm_gem_create_mmap_offset);
390 1.1 riastrad
391 1.1 riastrad /** Returns a reference to the object named by the handle. */
392 1.1 riastrad struct drm_gem_object *
393 1.1 riastrad drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
394 1.1 riastrad u32 handle)
395 1.1 riastrad {
396 1.1 riastrad struct drm_gem_object *obj;
397 1.1 riastrad
398 1.1 riastrad spin_lock(&filp->table_lock);
399 1.1 riastrad
400 1.1 riastrad /* Check if we currently have a reference on the object */
401 1.1 riastrad obj = idr_find(&filp->object_idr, handle);
402 1.1 riastrad if (obj == NULL) {
403 1.1 riastrad spin_unlock(&filp->table_lock);
404 1.1 riastrad return NULL;
405 1.1 riastrad }
406 1.1 riastrad
407 1.1 riastrad drm_gem_object_reference(obj);
408 1.1 riastrad
409 1.1 riastrad spin_unlock(&filp->table_lock);
410 1.1 riastrad
411 1.1 riastrad return obj;
412 1.1 riastrad }
413 1.1 riastrad EXPORT_SYMBOL(drm_gem_object_lookup);
414 1.1 riastrad
415 1.1 riastrad /**
416 1.1 riastrad * Releases the handle to an mm object.
417 1.1 riastrad */
418 1.1 riastrad int
419 1.1 riastrad drm_gem_close_ioctl(struct drm_device *dev, void *data,
420 1.1 riastrad struct drm_file *file_priv)
421 1.1 riastrad {
422 1.1 riastrad struct drm_gem_close *args = data;
423 1.1 riastrad int ret;
424 1.1 riastrad
425 1.1 riastrad if (!(dev->driver->driver_features & DRIVER_GEM))
426 1.1 riastrad return -ENODEV;
427 1.1 riastrad
428 1.1 riastrad ret = drm_gem_handle_delete(file_priv, args->handle);
429 1.1 riastrad
430 1.1 riastrad return ret;
431 1.1 riastrad }
432 1.1 riastrad
433 1.1 riastrad /**
434 1.1 riastrad * Create a global name for an object, returning the name.
435 1.1 riastrad *
436 1.1 riastrad * Note that the name does not hold a reference; when the object
437 1.1 riastrad * is freed, the name goes away.
438 1.1 riastrad */
439 1.1 riastrad int
440 1.1 riastrad drm_gem_flink_ioctl(struct drm_device *dev, void *data,
441 1.1 riastrad struct drm_file *file_priv)
442 1.1 riastrad {
443 1.1 riastrad struct drm_gem_flink *args = data;
444 1.1 riastrad struct drm_gem_object *obj;
445 1.1 riastrad int ret;
446 1.1 riastrad
447 1.1 riastrad if (!(dev->driver->driver_features & DRIVER_GEM))
448 1.1 riastrad return -ENODEV;
449 1.1 riastrad
450 1.1 riastrad obj = drm_gem_object_lookup(dev, file_priv, args->handle);
451 1.1 riastrad if (obj == NULL)
452 1.1 riastrad return -ENOENT;
453 1.1 riastrad
454 1.1 riastrad again:
455 1.1 riastrad if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
456 1.1 riastrad ret = -ENOMEM;
457 1.1 riastrad goto err;
458 1.1 riastrad }
459 1.1 riastrad
460 1.1 riastrad spin_lock(&dev->object_name_lock);
461 1.1 riastrad if (!obj->name) {
462 1.1 riastrad ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
463 1.1 riastrad &obj->name);
464 1.1 riastrad args->name = (uint64_t) obj->name;
465 1.1 riastrad spin_unlock(&dev->object_name_lock);
466 1.1 riastrad
467 1.1 riastrad if (ret == -EAGAIN)
468 1.1 riastrad goto again;
469 1.1 riastrad else if (ret)
470 1.1 riastrad goto err;
471 1.1 riastrad
472 1.1 riastrad /* Allocate a reference for the name table. */
473 1.1 riastrad drm_gem_object_reference(obj);
474 1.1 riastrad } else {
475 1.1 riastrad args->name = (uint64_t) obj->name;
476 1.1 riastrad spin_unlock(&dev->object_name_lock);
477 1.1 riastrad ret = 0;
478 1.1 riastrad }
479 1.1 riastrad
480 1.1 riastrad err:
481 1.1 riastrad drm_gem_object_unreference_unlocked(obj);
482 1.1 riastrad return ret;
483 1.1 riastrad }
484 1.1 riastrad
485 1.1 riastrad /**
486 1.1 riastrad * Open an object using the global name, returning a handle and the size.
487 1.1 riastrad *
488 1.1 riastrad * This handle (of course) holds a reference to the object, so the object
489 1.1 riastrad * will not go away until the handle is deleted.
490 1.1 riastrad */
491 1.1 riastrad int
492 1.1 riastrad drm_gem_open_ioctl(struct drm_device *dev, void *data,
493 1.1 riastrad struct drm_file *file_priv)
494 1.1 riastrad {
495 1.1 riastrad struct drm_gem_open *args = data;
496 1.1 riastrad struct drm_gem_object *obj;
497 1.1 riastrad int ret;
498 1.1 riastrad u32 handle;
499 1.1 riastrad
500 1.1 riastrad if (!(dev->driver->driver_features & DRIVER_GEM))
501 1.1 riastrad return -ENODEV;
502 1.1 riastrad
503 1.1 riastrad spin_lock(&dev->object_name_lock);
504 1.1 riastrad obj = idr_find(&dev->object_name_idr, (int) args->name);
505 1.1 riastrad if (obj)
506 1.1 riastrad drm_gem_object_reference(obj);
507 1.1 riastrad spin_unlock(&dev->object_name_lock);
508 1.1 riastrad if (!obj)
509 1.1 riastrad return -ENOENT;
510 1.1 riastrad
511 1.1 riastrad ret = drm_gem_handle_create(file_priv, obj, &handle);
512 1.1 riastrad drm_gem_object_unreference_unlocked(obj);
513 1.1 riastrad if (ret)
514 1.1 riastrad return ret;
515 1.1 riastrad
516 1.1 riastrad args->handle = handle;
517 1.1 riastrad args->size = obj->size;
518 1.1 riastrad
519 1.1 riastrad return 0;
520 1.1 riastrad }
521 1.1 riastrad
522 1.1 riastrad /**
523 1.1 riastrad * Called at device open time, sets up the structure for handling refcounting
524 1.1 riastrad * of mm objects.
525 1.1 riastrad */
526 1.1 riastrad void
527 1.1 riastrad drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
528 1.1 riastrad {
529 1.1 riastrad idr_init(&file_private->object_idr);
530 1.1 riastrad spin_lock_init(&file_private->table_lock);
531 1.1 riastrad }
532 1.1 riastrad
533 1.1 riastrad /**
534 1.1 riastrad * Called at device close to release the file's
535 1.1 riastrad * handle references on objects.
536 1.1 riastrad */
537 1.1 riastrad static int
538 1.1 riastrad drm_gem_object_release_handle(int id, void *ptr, void *data)
539 1.1 riastrad {
540 1.1 riastrad struct drm_file *file_priv = data;
541 1.1 riastrad struct drm_gem_object *obj = ptr;
542 1.1 riastrad struct drm_device *dev = obj->dev;
543 1.1 riastrad
544 1.1 riastrad drm_gem_remove_prime_handles(obj, file_priv);
545 1.1 riastrad
546 1.1 riastrad if (dev->driver->gem_close_object)
547 1.1 riastrad dev->driver->gem_close_object(obj, file_priv);
548 1.1 riastrad
549 1.1 riastrad drm_gem_object_handle_unreference_unlocked(obj);
550 1.1 riastrad
551 1.1 riastrad return 0;
552 1.1 riastrad }
553 1.1 riastrad
554 1.1 riastrad /**
555 1.1 riastrad * Called at close time when the filp is going away.
556 1.1 riastrad *
557 1.1 riastrad * Releases any remaining references on objects by this filp.
558 1.1 riastrad */
559 1.1 riastrad void
560 1.1 riastrad drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
561 1.1 riastrad {
562 1.1 riastrad idr_for_each(&file_private->object_idr,
563 1.1 riastrad &drm_gem_object_release_handle, file_private);
564 1.1 riastrad
565 1.1 riastrad idr_remove_all(&file_private->object_idr);
566 1.1 riastrad idr_destroy(&file_private->object_idr);
567 1.1 riastrad }
568 1.1 riastrad
569 1.1 riastrad void
570 1.1 riastrad drm_gem_object_release(struct drm_gem_object *obj)
571 1.1 riastrad {
572 1.1 riastrad if (obj->filp)
573 1.1 riastrad fput(obj->filp);
574 1.1 riastrad }
575 1.1 riastrad EXPORT_SYMBOL(drm_gem_object_release);
576 1.1 riastrad
577 1.1 riastrad /**
578 1.1 riastrad * Called after the last reference to the object has been lost.
579 1.1 riastrad * Must be called holding struct_ mutex
580 1.1 riastrad *
581 1.1 riastrad * Frees the object
582 1.1 riastrad */
583 1.1 riastrad void
584 1.1 riastrad drm_gem_object_free(struct kref *kref)
585 1.1 riastrad {
586 1.1 riastrad struct drm_gem_object *obj = (struct drm_gem_object *) kref;
587 1.1 riastrad struct drm_device *dev = obj->dev;
588 1.1 riastrad
589 1.1 riastrad BUG_ON(!mutex_is_locked(&dev->struct_mutex));
590 1.1 riastrad
591 1.1 riastrad if (dev->driver->gem_free_object != NULL)
592 1.1 riastrad dev->driver->gem_free_object(obj);
593 1.1 riastrad }
594 1.1 riastrad EXPORT_SYMBOL(drm_gem_object_free);
595 1.1 riastrad
596 1.1 riastrad static void drm_gem_object_ref_bug(struct kref *list_kref)
597 1.1 riastrad {
598 1.1 riastrad BUG();
599 1.1 riastrad }
600 1.1 riastrad
601 1.1 riastrad /**
602 1.1 riastrad * Called after the last handle to the object has been closed
603 1.1 riastrad *
604 1.1 riastrad * Removes any name for the object. Note that this must be
605 1.1 riastrad * called before drm_gem_object_free or we'll be touching
606 1.1 riastrad * freed memory
607 1.1 riastrad */
608 1.1 riastrad void drm_gem_object_handle_free(struct drm_gem_object *obj)
609 1.1 riastrad {
610 1.1 riastrad struct drm_device *dev = obj->dev;
611 1.1 riastrad
612 1.1 riastrad /* Remove any name for this object */
613 1.1 riastrad spin_lock(&dev->object_name_lock);
614 1.1 riastrad if (obj->name) {
615 1.1 riastrad idr_remove(&dev->object_name_idr, obj->name);
616 1.1 riastrad obj->name = 0;
617 1.1 riastrad spin_unlock(&dev->object_name_lock);
618 1.1 riastrad /*
619 1.1 riastrad * The object name held a reference to this object, drop
620 1.1 riastrad * that now.
621 1.1 riastrad *
622 1.1 riastrad * This cannot be the last reference, since the handle holds one too.
623 1.1 riastrad */
624 1.1 riastrad kref_put(&obj->refcount, drm_gem_object_ref_bug);
625 1.1 riastrad } else
626 1.1 riastrad spin_unlock(&dev->object_name_lock);
627 1.1 riastrad
628 1.1 riastrad }
629 1.1 riastrad EXPORT_SYMBOL(drm_gem_object_handle_free);
630 1.1 riastrad
631 1.1 riastrad void drm_gem_vm_open(struct vm_area_struct *vma)
632 1.1 riastrad {
633 1.1 riastrad struct drm_gem_object *obj = vma->vm_private_data;
634 1.1 riastrad
635 1.1 riastrad drm_gem_object_reference(obj);
636 1.1 riastrad
637 1.1 riastrad mutex_lock(&obj->dev->struct_mutex);
638 1.1 riastrad drm_vm_open_locked(obj->dev, vma);
639 1.1 riastrad mutex_unlock(&obj->dev->struct_mutex);
640 1.1 riastrad }
641 1.1 riastrad EXPORT_SYMBOL(drm_gem_vm_open);
642 1.1 riastrad
643 1.1 riastrad void drm_gem_vm_close(struct vm_area_struct *vma)
644 1.1 riastrad {
645 1.1 riastrad struct drm_gem_object *obj = vma->vm_private_data;
646 1.1 riastrad struct drm_device *dev = obj->dev;
647 1.1 riastrad
648 1.1 riastrad mutex_lock(&dev->struct_mutex);
649 1.1 riastrad drm_vm_close_locked(obj->dev, vma);
650 1.1 riastrad drm_gem_object_unreference(obj);
651 1.1 riastrad mutex_unlock(&dev->struct_mutex);
652 1.1 riastrad }
653 1.1 riastrad EXPORT_SYMBOL(drm_gem_vm_close);
654 1.1 riastrad
655 1.1 riastrad
656 1.1 riastrad /**
657 1.1 riastrad * drm_gem_mmap - memory map routine for GEM objects
658 1.1 riastrad * @filp: DRM file pointer
659 1.1 riastrad * @vma: VMA for the area to be mapped
660 1.1 riastrad *
661 1.1 riastrad * If a driver supports GEM object mapping, mmap calls on the DRM file
662 1.1 riastrad * descriptor will end up here.
663 1.1 riastrad *
664 1.1 riastrad * If we find the object based on the offset passed in (vma->vm_pgoff will
665 1.1 riastrad * contain the fake offset we created when the GTT map ioctl was called on
666 1.1 riastrad * the object), we set up the driver fault handler so that any accesses
667 1.1 riastrad * to the object can be trapped, to perform migration, GTT binding, surface
668 1.1 riastrad * register allocation, or performance monitoring.
669 1.1 riastrad */
670 1.1 riastrad int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
671 1.1 riastrad {
672 1.1 riastrad struct drm_file *priv = filp->private_data;
673 1.1 riastrad struct drm_device *dev = priv->minor->dev;
674 1.1 riastrad struct drm_gem_mm *mm = dev->mm_private;
675 1.1 riastrad struct drm_local_map *map = NULL;
676 1.1 riastrad struct drm_gem_object *obj;
677 1.1 riastrad struct drm_hash_item *hash;
678 1.1 riastrad int ret = 0;
679 1.1 riastrad
680 1.1 riastrad if (drm_device_is_unplugged(dev))
681 1.1 riastrad return -ENODEV;
682 1.1 riastrad
683 1.1 riastrad mutex_lock(&dev->struct_mutex);
684 1.1 riastrad
685 1.1 riastrad if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
686 1.1 riastrad mutex_unlock(&dev->struct_mutex);
687 1.1 riastrad return drm_mmap(filp, vma);
688 1.1 riastrad }
689 1.1 riastrad
690 1.1 riastrad map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
691 1.1 riastrad if (!map ||
692 1.1 riastrad ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
693 1.1 riastrad ret = -EPERM;
694 1.1 riastrad goto out_unlock;
695 1.1 riastrad }
696 1.1 riastrad
697 1.1 riastrad /* Check for valid size. */
698 1.1 riastrad if (map->size < vma->vm_end - vma->vm_start) {
699 1.1 riastrad ret = -EINVAL;
700 1.1 riastrad goto out_unlock;
701 1.1 riastrad }
702 1.1 riastrad
703 1.1 riastrad obj = map->handle;
704 1.1 riastrad if (!obj->dev->driver->gem_vm_ops) {
705 1.1 riastrad ret = -EINVAL;
706 1.1 riastrad goto out_unlock;
707 1.1 riastrad }
708 1.1 riastrad
709 1.1 riastrad vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
710 1.1 riastrad vma->vm_ops = obj->dev->driver->gem_vm_ops;
711 1.1 riastrad vma->vm_private_data = map->handle;
712 1.1 riastrad vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
713 1.1 riastrad
714 1.1 riastrad /* Take a ref for this mapping of the object, so that the fault
715 1.1 riastrad * handler can dereference the mmap offset's pointer to the object.
716 1.1 riastrad * This reference is cleaned up by the corresponding vm_close
717 1.1 riastrad * (which should happen whether the vma was created by this call, or
718 1.1 riastrad * by a vm_open due to mremap or partial unmap or whatever).
719 1.1 riastrad */
720 1.1 riastrad drm_gem_object_reference(obj);
721 1.1 riastrad
722 1.1 riastrad drm_vm_open_locked(dev, vma);
723 1.1 riastrad
724 1.1 riastrad out_unlock:
725 1.1 riastrad mutex_unlock(&dev->struct_mutex);
726 1.1 riastrad
727 1.1 riastrad return ret;
728 1.1 riastrad }
729 1.1 riastrad EXPORT_SYMBOL(drm_gem_mmap);
730