drm_gem.c revision 1.1.1.1.4.3 1 /*
2 * Copyright 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric (at) anholt.net>
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/err.h>
40 #include <linux/export.h>
41 #include <asm/bug.h>
42 #include <drm/drmP.h>
43
44 /** @file drm_gem.c
45 *
46 * This file provides some of the base ioctls and library routines for
47 * the graphics memory manager implemented by each device driver.
48 *
49 * Because various devices have different requirements in terms of
50 * synchronization and migration strategies, implementing that is left up to
51 * the driver, and all that the general API provides should be generic --
52 * allocating objects, reading/writing data with the cpu, freeing objects.
53 * Even there, platform-dependent optimizations for reading/writing data with
54 * the CPU mean we'll likely hook those out to driver-specific calls. However,
55 * the DRI2 implementation wants to have at least allocate/mmap be generic.
56 *
57 * The goal was to have swap-backed object allocation managed through
58 * struct file. However, file descriptors as handles to a struct file have
59 * two major failings:
60 * - Process limits prevent more than 1024 or so being used at a time by
61 * default.
62 * - Inability to allocate high fds will aggravate the X Server's select()
63 * handling, and likely that of many GL client applications as well.
64 *
65 * This led to a plan of using our own integer IDs (called handles, following
66 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
67 * ioctls. The objects themselves will still include the struct file so
68 * that we can transition to fds if the required kernel infrastructure shows
69 * up at a later date, and as our interface with shmfs for memory allocation.
70 */
71
72 /*
73 * We make up offsets for buffer objects so we can recognize them at
74 * mmap time.
75 */
76
77 /* pgoff in mmap is an unsigned long, so we need to make sure that
78 * the faked up offset will fit
79 */
80
81 #if BITS_PER_LONG == 64
82 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
83 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
84 #else
85 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
86 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
87 #endif
88
89 /**
90 * Initialize the GEM device fields
91 */
92
93 int
94 drm_gem_init(struct drm_device *dev)
95 {
96 struct drm_gem_mm *mm;
97
98 spin_lock_init(&dev->object_name_lock);
99 idr_init(&dev->object_name_idr);
100
101 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
102 if (!mm) {
103 DRM_ERROR("out of memory\n");
104 return -ENOMEM;
105 }
106
107 dev->mm_private = mm;
108
109 if (drm_ht_create(&mm->offset_hash, 12)) {
110 kfree(mm);
111 return -ENOMEM;
112 }
113
114 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
115 DRM_FILE_PAGE_OFFSET_SIZE)) {
116 drm_ht_remove(&mm->offset_hash);
117 kfree(mm);
118 return -ENOMEM;
119 }
120
121 return 0;
122 }
123
124 void
125 drm_gem_destroy(struct drm_device *dev)
126 {
127 struct drm_gem_mm *mm = dev->mm_private;
128
129 drm_mm_takedown(&mm->offset_manager);
130 drm_ht_remove(&mm->offset_hash);
131 kfree(mm);
132 dev->mm_private = NULL;
133
134 idr_destroy(&dev->object_name_idr);
135 #ifdef __NetBSD__
136 spin_lock_destroy(&dev->object_name_lock);
137 #endif
138 }
139
140 /**
141 * Initialize an already allocated GEM object of the specified size with
142 * shmfs backing store.
143 */
144 int drm_gem_object_init(struct drm_device *dev,
145 struct drm_gem_object *obj, size_t size)
146 {
147 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
148
149 obj->dev = dev;
150 #ifdef __NetBSD__
151 obj->gemo_shm_uao = uao_create(size, 0);
152 KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
153 KASSERT(dev->driver->gem_uvm_ops != NULL);
154 uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
155 #else
156 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
157 if (IS_ERR(obj->filp))
158 return PTR_ERR(obj->filp);
159 #endif
160
161 kref_init(&obj->refcount);
162 atomic_set(&obj->handle_count, 0);
163 obj->size = size;
164
165 return 0;
166 }
167 EXPORT_SYMBOL(drm_gem_object_init);
168
169 /**
170 * Initialize an already allocated GEM object of the specified size with
171 * no GEM provided backing store. Instead the caller is responsible for
172 * backing the object and handling it.
173 */
174 int drm_gem_private_object_init(struct drm_device *dev,
175 struct drm_gem_object *obj, size_t size)
176 {
177 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
178
179 obj->dev = dev;
180 #ifdef __NetBSD__
181 obj->gemo_shm_uao = NULL;
182 KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
183 KASSERT(dev->driver->gem_uvm_ops != NULL);
184 uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
185 #else
186 obj->filp = NULL;
187 #endif
188
189 kref_init(&obj->refcount);
190 atomic_set(&obj->handle_count, 0);
191 obj->size = size;
192
193 return 0;
194 }
195 EXPORT_SYMBOL(drm_gem_private_object_init);
196
197 /**
198 * Allocate a GEM object of the specified size with shmfs backing store
199 */
200 struct drm_gem_object *
201 drm_gem_object_alloc(struct drm_device *dev, size_t size)
202 {
203 struct drm_gem_object *obj;
204
205 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
206 if (!obj)
207 goto free;
208
209 if (drm_gem_object_init(dev, obj, size) != 0)
210 goto free;
211
212 if (dev->driver->gem_init_object != NULL &&
213 dev->driver->gem_init_object(obj) != 0) {
214 goto fput;
215 }
216 return obj;
217 fput:
218 drm_gem_object_release(obj);
219 free:
220 kfree(obj);
221 return NULL;
222 }
223 EXPORT_SYMBOL(drm_gem_object_alloc);
224
225 #ifndef __NetBSD__ /* XXX drm prime */
226 static void
227 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
228 {
229 if (obj->import_attach) {
230 drm_prime_remove_imported_buf_handle(&filp->prime,
231 obj->import_attach->dmabuf);
232 }
233 if (obj->export_dma_buf) {
234 drm_prime_remove_imported_buf_handle(&filp->prime,
235 obj->export_dma_buf);
236 }
237 }
238 #endif
239
240 /**
241 * Removes the mapping from handle to filp for this object.
242 */
243 int
244 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
245 {
246 struct drm_device *dev;
247 struct drm_gem_object *obj;
248
249 /* This is gross. The idr system doesn't let us try a delete and
250 * return an error code. It just spews if you fail at deleting.
251 * So, we have to grab a lock around finding the object and then
252 * doing the delete on it and dropping the refcount, or the user
253 * could race us to double-decrement the refcount and cause a
254 * use-after-free later. Given the frequency of our handle lookups,
255 * we may want to use ida for number allocation and a hash table
256 * for the pointers, anyway.
257 */
258 spin_lock(&filp->table_lock);
259
260 /* Check if we currently have a reference on the object */
261 obj = idr_find(&filp->object_idr, handle);
262 if (obj == NULL) {
263 spin_unlock(&filp->table_lock);
264 return -EINVAL;
265 }
266 dev = obj->dev;
267
268 /* Release reference and decrement refcount. */
269 idr_remove(&filp->object_idr, handle);
270 spin_unlock(&filp->table_lock);
271
272 #ifndef __NetBSD__ /* XXX drm prime */
273 drm_gem_remove_prime_handles(obj, filp);
274 #endif
275
276 if (dev->driver->gem_close_object)
277 dev->driver->gem_close_object(obj, filp);
278 drm_gem_object_handle_unreference_unlocked(obj);
279
280 return 0;
281 }
282 EXPORT_SYMBOL(drm_gem_handle_delete);
283
284 /**
285 * Create a handle for this object. This adds a handle reference
286 * to the object, which includes a regular reference count. Callers
287 * will likely want to dereference the object afterwards.
288 */
289 int
290 drm_gem_handle_create(struct drm_file *file_priv,
291 struct drm_gem_object *obj,
292 u32 *handlep)
293 {
294 struct drm_device *dev = obj->dev;
295 int ret;
296
297 /*
298 * Get the user-visible handle using idr.
299 */
300 again:
301 /* ensure there is space available to allocate a handle */
302 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
303 return -ENOMEM;
304
305 /* do the allocation under our spinlock */
306 spin_lock(&file_priv->table_lock);
307 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
308 spin_unlock(&file_priv->table_lock);
309 if (ret == -EAGAIN)
310 goto again;
311 else if (ret)
312 return ret;
313
314 drm_gem_object_handle_reference(obj);
315
316 if (dev->driver->gem_open_object) {
317 ret = dev->driver->gem_open_object(obj, file_priv);
318 if (ret) {
319 drm_gem_handle_delete(file_priv, *handlep);
320 return ret;
321 }
322 }
323
324 return 0;
325 }
326 EXPORT_SYMBOL(drm_gem_handle_create);
327
328
329 /**
330 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
331 * @obj: obj in question
332 *
333 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
334 */
335 void
336 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
337 {
338 struct drm_device *dev = obj->dev;
339 struct drm_gem_mm *mm = dev->mm_private;
340 struct drm_map_list *list = &obj->map_list;
341
342 drm_ht_remove_item(&mm->offset_hash, &list->hash);
343 drm_mm_put_block(list->file_offset_node);
344 kfree(list->map);
345 list->map = NULL;
346 }
347 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
348
349 /**
350 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
351 * @obj: obj in question
352 *
353 * GEM memory mapping works by handing back to userspace a fake mmap offset
354 * it can use in a subsequent mmap(2) call. The DRM core code then looks
355 * up the object based on the offset and sets up the various memory mapping
356 * structures.
357 *
358 * This routine allocates and attaches a fake offset for @obj.
359 */
360 int
361 drm_gem_create_mmap_offset(struct drm_gem_object *obj)
362 {
363 struct drm_device *dev = obj->dev;
364 struct drm_gem_mm *mm = dev->mm_private;
365 struct drm_map_list *list;
366 struct drm_local_map *map;
367 int ret;
368
369 /* Set the object up for mmap'ing */
370 list = &obj->map_list;
371 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
372 if (!list->map)
373 return -ENOMEM;
374
375 map = list->map;
376 map->type = _DRM_GEM;
377 map->size = obj->size;
378 map->handle = obj;
379
380 /* Get a DRM GEM mmap offset allocated... */
381 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
382 obj->size / PAGE_SIZE, 0, false);
383
384 if (!list->file_offset_node) {
385 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
386 ret = -ENOSPC;
387 goto out_free_list;
388 }
389
390 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
391 obj->size / PAGE_SIZE, 0);
392 if (!list->file_offset_node) {
393 ret = -ENOMEM;
394 goto out_free_list;
395 }
396
397 list->hash.key = list->file_offset_node->start;
398 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
399 if (ret) {
400 DRM_ERROR("failed to add to map hash\n");
401 goto out_free_mm;
402 }
403
404 return 0;
405
406 out_free_mm:
407 drm_mm_put_block(list->file_offset_node);
408 out_free_list:
409 kfree(list->map);
410 list->map = NULL;
411
412 return ret;
413 }
414 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
415
416 /** Returns a reference to the object named by the handle. */
417 struct drm_gem_object *
418 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
419 u32 handle)
420 {
421 struct drm_gem_object *obj;
422
423 spin_lock(&filp->table_lock);
424
425 /* Check if we currently have a reference on the object */
426 obj = idr_find(&filp->object_idr, handle);
427 if (obj == NULL) {
428 spin_unlock(&filp->table_lock);
429 return NULL;
430 }
431
432 drm_gem_object_reference(obj);
433
434 spin_unlock(&filp->table_lock);
435
436 return obj;
437 }
438 EXPORT_SYMBOL(drm_gem_object_lookup);
439
440 /**
441 * Releases the handle to an mm object.
442 */
443 int
444 drm_gem_close_ioctl(struct drm_device *dev, void *data,
445 struct drm_file *file_priv)
446 {
447 struct drm_gem_close *args = data;
448 int ret;
449
450 if (!(dev->driver->driver_features & DRIVER_GEM))
451 return -ENODEV;
452
453 ret = drm_gem_handle_delete(file_priv, args->handle);
454
455 return ret;
456 }
457
458 /**
459 * Create a global name for an object, returning the name.
460 *
461 * Note that the name does not hold a reference; when the object
462 * is freed, the name goes away.
463 */
464 int
465 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
466 struct drm_file *file_priv)
467 {
468 struct drm_gem_flink *args = data;
469 struct drm_gem_object *obj;
470 int ret;
471
472 if (!(dev->driver->driver_features & DRIVER_GEM))
473 return -ENODEV;
474
475 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
476 if (obj == NULL)
477 return -ENOENT;
478
479 again:
480 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
481 ret = -ENOMEM;
482 goto err;
483 }
484
485 spin_lock(&dev->object_name_lock);
486 if (!obj->name) {
487 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
488 &obj->name);
489 args->name = (uint64_t) obj->name;
490 spin_unlock(&dev->object_name_lock);
491
492 if (ret == -EAGAIN)
493 goto again;
494 else if (ret)
495 goto err;
496
497 /* Allocate a reference for the name table. */
498 drm_gem_object_reference(obj);
499 } else {
500 args->name = (uint64_t) obj->name;
501 spin_unlock(&dev->object_name_lock);
502 ret = 0;
503 }
504
505 err:
506 drm_gem_object_unreference_unlocked(obj);
507 return ret;
508 }
509
510 /**
511 * Open an object using the global name, returning a handle and the size.
512 *
513 * This handle (of course) holds a reference to the object, so the object
514 * will not go away until the handle is deleted.
515 */
516 int
517 drm_gem_open_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file_priv)
519 {
520 struct drm_gem_open *args = data;
521 struct drm_gem_object *obj;
522 int ret;
523 u32 handle;
524
525 if (!(dev->driver->driver_features & DRIVER_GEM))
526 return -ENODEV;
527
528 spin_lock(&dev->object_name_lock);
529 obj = idr_find(&dev->object_name_idr, (int) args->name);
530 if (obj)
531 drm_gem_object_reference(obj);
532 spin_unlock(&dev->object_name_lock);
533 if (!obj)
534 return -ENOENT;
535
536 ret = drm_gem_handle_create(file_priv, obj, &handle);
537 drm_gem_object_unreference_unlocked(obj);
538 if (ret)
539 return ret;
540
541 args->handle = handle;
542 args->size = obj->size;
543
544 return 0;
545 }
546
547 /**
548 * Called at device open time, sets up the structure for handling refcounting
549 * of mm objects.
550 */
551 void
552 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
553 {
554 idr_init(&file_private->object_idr);
555 spin_lock_init(&file_private->table_lock);
556 }
557
558 /**
559 * Called at device close to release the file's
560 * handle references on objects.
561 */
562 static int
563 drm_gem_object_release_handle(int id, void *ptr, void *data)
564 {
565 struct drm_file *file_priv = data;
566 struct drm_gem_object *obj = ptr;
567 struct drm_device *dev = obj->dev;
568
569 #ifndef __NetBSD__ /* XXX drm prime */
570 drm_gem_remove_prime_handles(obj, file_priv);
571 #endif
572
573 if (dev->driver->gem_close_object)
574 dev->driver->gem_close_object(obj, file_priv);
575
576 drm_gem_object_handle_unreference_unlocked(obj);
577
578 return 0;
579 }
580
581 /**
582 * Called at close time when the filp is going away.
583 *
584 * Releases any remaining references on objects by this filp.
585 */
586 void
587 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
588 {
589 idr_for_each(&file_private->object_idr,
590 &drm_gem_object_release_handle, file_private);
591
592 idr_remove_all(&file_private->object_idr);
593 idr_destroy(&file_private->object_idr);
594 #ifdef __NetBSD__
595 spin_lock_destroy(&file_private->table_lock);
596 #endif
597 }
598
599 void
600 drm_gem_object_release(struct drm_gem_object *obj)
601 {
602 #ifdef __NetBSD__
603 if (obj->gemo_shm_uao)
604 uao_detach(obj->gemo_shm_uao);
605 uvm_obj_destroy(&obj->gemo_uvmobj, true);
606 #else
607 if (obj->filp)
608 fput(obj->filp);
609 #endif
610 }
611 EXPORT_SYMBOL(drm_gem_object_release);
612
613 /**
614 * Called after the last reference to the object has been lost.
615 * Must be called holding struct_ mutex
616 *
617 * Frees the object
618 */
619 void
620 drm_gem_object_free(struct kref *kref)
621 {
622 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
623 struct drm_device *dev = obj->dev;
624
625 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
626
627 if (dev->driver->gem_free_object != NULL)
628 dev->driver->gem_free_object(obj);
629 }
630 EXPORT_SYMBOL(drm_gem_object_free);
631
632 static void drm_gem_object_ref_bug(struct kref *list_kref)
633 {
634 BUG();
635 }
636
637 /**
638 * Called after the last handle to the object has been closed
639 *
640 * Removes any name for the object. Note that this must be
641 * called before drm_gem_object_free or we'll be touching
642 * freed memory
643 */
644 void drm_gem_object_handle_free(struct drm_gem_object *obj)
645 {
646 struct drm_device *dev = obj->dev;
647
648 /* Remove any name for this object */
649 spin_lock(&dev->object_name_lock);
650 if (obj->name) {
651 idr_remove(&dev->object_name_idr, obj->name);
652 obj->name = 0;
653 spin_unlock(&dev->object_name_lock);
654 /*
655 * The object name held a reference to this object, drop
656 * that now.
657 *
658 * This cannot be the last reference, since the handle holds one too.
659 */
660 kref_put(&obj->refcount, drm_gem_object_ref_bug);
661 } else
662 spin_unlock(&dev->object_name_lock);
663
664 }
665 EXPORT_SYMBOL(drm_gem_object_handle_free);
666
667 #ifndef __NetBSD__
668
669 void drm_gem_vm_open(struct vm_area_struct *vma)
670 {
671 struct drm_gem_object *obj = vma->vm_private_data;
672
673 drm_gem_object_reference(obj);
674
675 mutex_lock(&obj->dev->struct_mutex);
676 drm_vm_open_locked(obj->dev, vma);
677 mutex_unlock(&obj->dev->struct_mutex);
678 }
679 EXPORT_SYMBOL(drm_gem_vm_open);
680
681 void drm_gem_vm_close(struct vm_area_struct *vma)
682 {
683 struct drm_gem_object *obj = vma->vm_private_data;
684 struct drm_device *dev = obj->dev;
685
686 mutex_lock(&dev->struct_mutex);
687 drm_vm_close_locked(obj->dev, vma);
688 drm_gem_object_unreference(obj);
689 mutex_unlock(&dev->struct_mutex);
690 }
691 EXPORT_SYMBOL(drm_gem_vm_close);
692
693
694 /**
695 * drm_gem_mmap - memory map routine for GEM objects
696 * @filp: DRM file pointer
697 * @vma: VMA for the area to be mapped
698 *
699 * If a driver supports GEM object mapping, mmap calls on the DRM file
700 * descriptor will end up here.
701 *
702 * If we find the object based on the offset passed in (vma->vm_pgoff will
703 * contain the fake offset we created when the GTT map ioctl was called on
704 * the object), we set up the driver fault handler so that any accesses
705 * to the object can be trapped, to perform migration, GTT binding, surface
706 * register allocation, or performance monitoring.
707 */
708 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
709 {
710 struct drm_file *priv = filp->private_data;
711 struct drm_device *dev = priv->minor->dev;
712 struct drm_gem_mm *mm = dev->mm_private;
713 struct drm_local_map *map = NULL;
714 struct drm_gem_object *obj;
715 struct drm_hash_item *hash;
716 int ret = 0;
717
718 if (drm_device_is_unplugged(dev))
719 return -ENODEV;
720
721 mutex_lock(&dev->struct_mutex);
722
723 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
724 mutex_unlock(&dev->struct_mutex);
725 return drm_mmap(filp, vma);
726 }
727
728 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
729 if (!map ||
730 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
731 ret = -EPERM;
732 goto out_unlock;
733 }
734
735 /* Check for valid size. */
736 if (map->size < vma->vm_end - vma->vm_start) {
737 ret = -EINVAL;
738 goto out_unlock;
739 }
740
741 obj = map->handle;
742 if (!obj->dev->driver->gem_vm_ops) {
743 ret = -EINVAL;
744 goto out_unlock;
745 }
746
747 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
748 vma->vm_ops = obj->dev->driver->gem_vm_ops;
749 vma->vm_private_data = map->handle;
750 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
751
752 /* Take a ref for this mapping of the object, so that the fault
753 * handler can dereference the mmap offset's pointer to the object.
754 * This reference is cleaned up by the corresponding vm_close
755 * (which should happen whether the vma was created by this call, or
756 * by a vm_open due to mremap or partial unmap or whatever).
757 */
758 drm_gem_object_reference(obj);
759
760 drm_vm_open_locked(dev, vma);
761
762 out_unlock:
763 mutex_unlock(&dev->struct_mutex);
764
765 return ret;
766 }
767 EXPORT_SYMBOL(drm_gem_mmap);
768
769 #endif /* defined(__NetBSD__) */
770