Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_mn.c revision 1.1.1.1
      1 /*	$NetBSD: amdgpu_mn.c,v 1.1.1.1 2018/08/27 01:34:44 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2014 Advanced Micro Devices, Inc.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sub license, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  * The above copyright notice and this permission notice (including the
     24  * next paragraph) shall be included in all copies or substantial portions
     25  * of the Software.
     26  *
     27  */
     28 /*
     29  * Authors:
     30  *    Christian Knig <christian.koenig (at) amd.com>
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: amdgpu_mn.c,v 1.1.1.1 2018/08/27 01:34:44 riastradh Exp $");
     35 
     36 #include <linux/firmware.h>
     37 #include <linux/module.h>
     38 #include <linux/mmu_notifier.h>
     39 #include <drm/drmP.h>
     40 #include <drm/drm.h>
     41 
     42 #include "amdgpu.h"
     43 
     44 struct amdgpu_mn {
     45 	/* constant after initialisation */
     46 	struct amdgpu_device	*adev;
     47 	struct mm_struct	*mm;
     48 	struct mmu_notifier	mn;
     49 
     50 	/* only used on destruction */
     51 	struct work_struct	work;
     52 
     53 	/* protected by adev->mn_lock */
     54 	struct hlist_node	node;
     55 
     56 	/* objects protected by lock */
     57 	struct mutex		lock;
     58 	struct rb_root		objects;
     59 };
     60 
     61 struct amdgpu_mn_node {
     62 	struct interval_tree_node	it;
     63 	struct list_head		bos;
     64 };
     65 
     66 /**
     67  * amdgpu_mn_destroy - destroy the rmn
     68  *
     69  * @work: previously sheduled work item
     70  *
     71  * Lazy destroys the notifier from a work item
     72  */
     73 static void amdgpu_mn_destroy(struct work_struct *work)
     74 {
     75 	struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
     76 	struct amdgpu_device *adev = rmn->adev;
     77 	struct amdgpu_mn_node *node, *next_node;
     78 	struct amdgpu_bo *bo, *next_bo;
     79 
     80 	mutex_lock(&adev->mn_lock);
     81 	mutex_lock(&rmn->lock);
     82 	hash_del(&rmn->node);
     83 	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
     84 					     it.rb) {
     85 
     86 		interval_tree_remove(&node->it, &rmn->objects);
     87 		list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
     88 			bo->mn = NULL;
     89 			list_del_init(&bo->mn_list);
     90 		}
     91 		kfree(node);
     92 	}
     93 	mutex_unlock(&rmn->lock);
     94 	mutex_unlock(&adev->mn_lock);
     95 	mmu_notifier_unregister(&rmn->mn, rmn->mm);
     96 	kfree(rmn);
     97 }
     98 
     99 /**
    100  * amdgpu_mn_release - callback to notify about mm destruction
    101  *
    102  * @mn: our notifier
    103  * @mn: the mm this callback is about
    104  *
    105  * Shedule a work item to lazy destroy our notifier.
    106  */
    107 static void amdgpu_mn_release(struct mmu_notifier *mn,
    108 			      struct mm_struct *mm)
    109 {
    110 	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
    111 	INIT_WORK(&rmn->work, amdgpu_mn_destroy);
    112 	schedule_work(&rmn->work);
    113 }
    114 
    115 /**
    116  * amdgpu_mn_invalidate_range_start - callback to notify about mm change
    117  *
    118  * @mn: our notifier
    119  * @mn: the mm this callback is about
    120  * @start: start of updated range
    121  * @end: end of updated range
    122  *
    123  * We block for all BOs between start and end to be idle and
    124  * unmap them by move them into system domain again.
    125  */
    126 static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
    127 					     struct mm_struct *mm,
    128 					     unsigned long start,
    129 					     unsigned long end)
    130 {
    131 	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
    132 	struct interval_tree_node *it;
    133 
    134 	/* notification is exclusive, but interval is inclusive */
    135 	end -= 1;
    136 
    137 	mutex_lock(&rmn->lock);
    138 
    139 	it = interval_tree_iter_first(&rmn->objects, start, end);
    140 	while (it) {
    141 		struct amdgpu_mn_node *node;
    142 		struct amdgpu_bo *bo;
    143 		long r;
    144 
    145 		node = container_of(it, struct amdgpu_mn_node, it);
    146 		it = interval_tree_iter_next(it, start, end);
    147 
    148 		list_for_each_entry(bo, &node->bos, mn_list) {
    149 
    150 			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
    151 							  end))
    152 				continue;
    153 
    154 			r = amdgpu_bo_reserve(bo, true);
    155 			if (r) {
    156 				DRM_ERROR("(%ld) failed to reserve user bo\n", r);
    157 				continue;
    158 			}
    159 
    160 			r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
    161 				true, false, MAX_SCHEDULE_TIMEOUT);
    162 			if (r <= 0)
    163 				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
    164 
    165 			amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
    166 			r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
    167 			if (r)
    168 				DRM_ERROR("(%ld) failed to validate user bo\n", r);
    169 
    170 			amdgpu_bo_unreserve(bo);
    171 		}
    172 	}
    173 
    174 	mutex_unlock(&rmn->lock);
    175 }
    176 
    177 static const struct mmu_notifier_ops amdgpu_mn_ops = {
    178 	.release = amdgpu_mn_release,
    179 	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
    180 };
    181 
    182 /**
    183  * amdgpu_mn_get - create notifier context
    184  *
    185  * @adev: amdgpu device pointer
    186  *
    187  * Creates a notifier context for current->mm.
    188  */
    189 static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
    190 {
    191 	struct mm_struct *mm = current->mm;
    192 	struct amdgpu_mn *rmn;
    193 	int r;
    194 
    195 	down_write(&mm->mmap_sem);
    196 	mutex_lock(&adev->mn_lock);
    197 
    198 	hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
    199 		if (rmn->mm == mm)
    200 			goto release_locks;
    201 
    202 	rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
    203 	if (!rmn) {
    204 		rmn = ERR_PTR(-ENOMEM);
    205 		goto release_locks;
    206 	}
    207 
    208 	rmn->adev = adev;
    209 	rmn->mm = mm;
    210 	rmn->mn.ops = &amdgpu_mn_ops;
    211 	mutex_init(&rmn->lock);
    212 	rmn->objects = RB_ROOT;
    213 
    214 	r = __mmu_notifier_register(&rmn->mn, mm);
    215 	if (r)
    216 		goto free_rmn;
    217 
    218 	hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
    219 
    220 release_locks:
    221 	mutex_unlock(&adev->mn_lock);
    222 	up_write(&mm->mmap_sem);
    223 
    224 	return rmn;
    225 
    226 free_rmn:
    227 	mutex_unlock(&adev->mn_lock);
    228 	up_write(&mm->mmap_sem);
    229 	kfree(rmn);
    230 
    231 	return ERR_PTR(r);
    232 }
    233 
    234 /**
    235  * amdgpu_mn_register - register a BO for notifier updates
    236  *
    237  * @bo: amdgpu buffer object
    238  * @addr: userptr addr we should monitor
    239  *
    240  * Registers an MMU notifier for the given BO at the specified address.
    241  * Returns 0 on success, -ERRNO if anything goes wrong.
    242  */
    243 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
    244 {
    245 	unsigned long end = addr + amdgpu_bo_size(bo) - 1;
    246 	struct amdgpu_device *adev = bo->adev;
    247 	struct amdgpu_mn *rmn;
    248 	struct amdgpu_mn_node *node = NULL;
    249 	struct list_head bos;
    250 	struct interval_tree_node *it;
    251 
    252 	rmn = amdgpu_mn_get(adev);
    253 	if (IS_ERR(rmn))
    254 		return PTR_ERR(rmn);
    255 
    256 	INIT_LIST_HEAD(&bos);
    257 
    258 	mutex_lock(&rmn->lock);
    259 
    260 	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
    261 		kfree(node);
    262 		node = container_of(it, struct amdgpu_mn_node, it);
    263 		interval_tree_remove(&node->it, &rmn->objects);
    264 		addr = min(it->start, addr);
    265 		end = max(it->last, end);
    266 		list_splice(&node->bos, &bos);
    267 	}
    268 
    269 	if (!node) {
    270 		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
    271 		if (!node) {
    272 			mutex_unlock(&rmn->lock);
    273 			return -ENOMEM;
    274 		}
    275 	}
    276 
    277 	bo->mn = rmn;
    278 
    279 	node->it.start = addr;
    280 	node->it.last = end;
    281 	INIT_LIST_HEAD(&node->bos);
    282 	list_splice(&bos, &node->bos);
    283 	list_add(&bo->mn_list, &node->bos);
    284 
    285 	interval_tree_insert(&node->it, &rmn->objects);
    286 
    287 	mutex_unlock(&rmn->lock);
    288 
    289 	return 0;
    290 }
    291 
    292 /**
    293  * amdgpu_mn_unregister - unregister a BO for notifier updates
    294  *
    295  * @bo: amdgpu buffer object
    296  *
    297  * Remove any registration of MMU notifier updates from the buffer object.
    298  */
    299 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
    300 {
    301 	struct amdgpu_device *adev = bo->adev;
    302 	struct amdgpu_mn *rmn;
    303 	struct list_head *head;
    304 
    305 	mutex_lock(&adev->mn_lock);
    306 	rmn = bo->mn;
    307 	if (rmn == NULL) {
    308 		mutex_unlock(&adev->mn_lock);
    309 		return;
    310 	}
    311 
    312 	mutex_lock(&rmn->lock);
    313 	/* save the next list entry for later */
    314 	head = bo->mn_list.next;
    315 
    316 	bo->mn = NULL;
    317 	list_del(&bo->mn_list);
    318 
    319 	if (list_empty(head)) {
    320 		struct amdgpu_mn_node *node;
    321 		node = container_of(head, struct amdgpu_mn_node, bos);
    322 		interval_tree_remove(&node->it, &rmn->objects);
    323 		kfree(node);
    324 	}
    325 
    326 	mutex_unlock(&rmn->lock);
    327 	mutex_unlock(&adev->mn_lock);
    328 }
    329