1 1.6 riastrad /* $NetBSD: i915_gem_evict.c,v 1.6 2021/12/19 12:25:37 riastradh Exp $ */ 2 1.2 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2008-2010 Intel Corporation 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice (including the next 14 1.1 riastrad * paragraph) shall be included in all copies or substantial portions of the 15 1.1 riastrad * Software. 16 1.1 riastrad * 17 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 1.1 riastrad * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 1.1 riastrad * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 1.1 riastrad * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 1.1 riastrad * IN THE SOFTWARE. 24 1.1 riastrad * 25 1.1 riastrad * Authors: 26 1.1 riastrad * Eric Anholt <eric (at) anholt.net> 27 1.1 riastrad * Chris Wilson <chris (at) chris-wilson.co.uuk> 28 1.1 riastrad * 29 1.1 riastrad */ 30 1.1 riastrad 31 1.2 riastrad #include <sys/cdefs.h> 32 1.6 riastrad __KERNEL_RCSID(0, "$NetBSD: i915_gem_evict.c,v 1.6 2021/12/19 12:25:37 riastradh Exp $"); 33 1.2 riastrad 34 1.2 riastrad #include <drm/i915_drm.h> 35 1.2 riastrad 36 1.3 riastrad #include "gem/i915_gem_context.h" 37 1.3 riastrad #include "gt/intel_gt_requests.h" 38 1.3 riastrad 39 1.1 riastrad #include "i915_drv.h" 40 1.1 riastrad #include "i915_trace.h" 41 1.1 riastrad 42 1.5 riastrad #include <linux/nbsd-namespace.h> 43 1.5 riastrad 44 1.3 riastrad I915_SELFTEST_DECLARE(static struct igt_evict_ctl { 45 1.3 riastrad bool fail_if_busy:1; 46 1.3 riastrad } igt_evict_ctl;) 47 1.3 riastrad 48 1.3 riastrad static int ggtt_flush(struct intel_gt *gt) 49 1.3 riastrad { 50 1.3 riastrad /* 51 1.3 riastrad * Not everything in the GGTT is tracked via vma (otherwise we 52 1.3 riastrad * could evict as required with minimal stalling) so we are forced 53 1.3 riastrad * to idle the GPU and explicitly retire outstanding requests in 54 1.3 riastrad * the hopes that we can then remove contexts and the like only 55 1.3 riastrad * bound by their active reference. 56 1.3 riastrad */ 57 1.3 riastrad return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 58 1.3 riastrad } 59 1.3 riastrad 60 1.1 riastrad static bool 61 1.3 riastrad mark_free(struct drm_mm_scan *scan, 62 1.3 riastrad struct i915_vma *vma, 63 1.3 riastrad unsigned int flags, 64 1.3 riastrad struct list_head *unwind) 65 1.1 riastrad { 66 1.3 riastrad if (i915_vma_is_pinned(vma)) 67 1.2 riastrad return false; 68 1.2 riastrad 69 1.3 riastrad list_add(&vma->evict_link, unwind); 70 1.3 riastrad return drm_mm_scan_add_block(scan, &vma->node); 71 1.1 riastrad } 72 1.1 riastrad 73 1.2 riastrad /** 74 1.2 riastrad * i915_gem_evict_something - Evict vmas to make room for binding a new one 75 1.2 riastrad * @vm: address space to evict from 76 1.2 riastrad * @min_size: size of the desired free space 77 1.2 riastrad * @alignment: alignment constraint of the desired free space 78 1.3 riastrad * @color: color for the desired space 79 1.2 riastrad * @start: start (inclusive) of the range from which to evict objects 80 1.2 riastrad * @end: end (exclusive) of the range from which to evict objects 81 1.2 riastrad * @flags: additional flags to control the eviction algorithm 82 1.2 riastrad * 83 1.2 riastrad * This function will try to evict vmas until a free space satisfying the 84 1.2 riastrad * requirements is found. Callers must check first whether any such hole exists 85 1.2 riastrad * already before calling this function. 86 1.2 riastrad * 87 1.2 riastrad * This function is used by the object/vma binding code. 88 1.2 riastrad * 89 1.2 riastrad * Since this function is only used to free up virtual address space it only 90 1.2 riastrad * ignores pinned vmas, and not object where the backing storage itself is 91 1.2 riastrad * pinned. Hence obj->pages_pin_count does not protect against eviction. 92 1.2 riastrad * 93 1.2 riastrad * To clarify: This is for freeing up virtual address space, not for freeing 94 1.2 riastrad * memory in e.g. the shrinker. 95 1.2 riastrad */ 96 1.1 riastrad int 97 1.3 riastrad i915_gem_evict_something(struct i915_address_space *vm, 98 1.3 riastrad u64 min_size, u64 alignment, 99 1.3 riastrad unsigned long color, 100 1.3 riastrad u64 start, u64 end, 101 1.2 riastrad unsigned flags) 102 1.1 riastrad { 103 1.3 riastrad struct drm_mm_scan scan; 104 1.3 riastrad struct list_head eviction_list; 105 1.3 riastrad struct i915_vma *vma, *next; 106 1.3 riastrad struct drm_mm_node *node; 107 1.3 riastrad enum drm_mm_insert_mode mode; 108 1.3 riastrad struct i915_vma *active; 109 1.3 riastrad int ret; 110 1.1 riastrad 111 1.3 riastrad lockdep_assert_held(&vm->mutex); 112 1.3 riastrad trace_i915_gem_evict(vm, min_size, alignment, flags); 113 1.1 riastrad 114 1.1 riastrad /* 115 1.3 riastrad * The goal is to evict objects and amalgamate space in rough LRU order. 116 1.3 riastrad * Since both active and inactive objects reside on the same list, 117 1.3 riastrad * in a mix of creation and last scanned order, as we process the list 118 1.3 riastrad * we sort it into inactive/active, which keeps the active portion 119 1.3 riastrad * in a rough MRU order. 120 1.1 riastrad * 121 1.1 riastrad * The retirement sequence is thus: 122 1.3 riastrad * 1. Inactive objects (already retired, random order) 123 1.3 riastrad * 2. Active objects (will stall on unbinding, oldest scanned first) 124 1.1 riastrad */ 125 1.3 riastrad mode = DRM_MM_INSERT_BEST; 126 1.3 riastrad if (flags & PIN_HIGH) 127 1.3 riastrad mode = DRM_MM_INSERT_HIGH; 128 1.3 riastrad if (flags & PIN_MAPPABLE) 129 1.3 riastrad mode = DRM_MM_INSERT_LOW; 130 1.3 riastrad drm_mm_scan_init_with_range(&scan, &vm->mm, 131 1.3 riastrad min_size, alignment, color, 132 1.3 riastrad start, end, mode); 133 1.1 riastrad 134 1.3 riastrad intel_gt_retire_requests(vm->gt); 135 1.1 riastrad 136 1.2 riastrad search_again: 137 1.3 riastrad active = NULL; 138 1.3 riastrad INIT_LIST_HEAD(&eviction_list); 139 1.3 riastrad list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { 140 1.3 riastrad /* 141 1.3 riastrad * We keep this list in a rough least-recently scanned order 142 1.3 riastrad * of active elements (inactive elements are cheap to reap). 143 1.3 riastrad * New entries are added to the end, and we move anything we 144 1.3 riastrad * scan to the end. The assumption is that the working set 145 1.3 riastrad * of applications is either steady state (and thanks to the 146 1.3 riastrad * userspace bo cache it almost always is) or volatile and 147 1.3 riastrad * frequently replaced after a frame, which are self-evicting! 148 1.3 riastrad * Given that assumption, the MRU order of the scan list is 149 1.3 riastrad * fairly static, and keeping it in least-recently scan order 150 1.3 riastrad * is suitable. 151 1.3 riastrad * 152 1.3 riastrad * To notice when we complete one full cycle, we record the 153 1.3 riastrad * first active element seen, before moving it to the tail. 154 1.3 riastrad */ 155 1.3 riastrad if (i915_vma_is_active(vma)) { 156 1.3 riastrad if (vma == active) { 157 1.3 riastrad if (flags & PIN_NONBLOCK) 158 1.3 riastrad break; 159 1.3 riastrad 160 1.3 riastrad active = ERR_PTR(-EAGAIN); 161 1.3 riastrad } 162 1.3 riastrad 163 1.3 riastrad if (active != ERR_PTR(-EAGAIN)) { 164 1.3 riastrad if (!active) 165 1.3 riastrad active = vma; 166 1.3 riastrad 167 1.3 riastrad list_move_tail(&vma->vm_link, &vm->bound_list); 168 1.3 riastrad continue; 169 1.3 riastrad } 170 1.3 riastrad } 171 1.1 riastrad 172 1.3 riastrad if (mark_free(&scan, vma, flags, &eviction_list)) 173 1.1 riastrad goto found; 174 1.1 riastrad } 175 1.1 riastrad 176 1.1 riastrad /* Nothing found, clean up and bail out! */ 177 1.3 riastrad list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 178 1.3 riastrad ret = drm_mm_scan_remove_block(&scan, &vma->node); 179 1.2 riastrad BUG_ON(ret); 180 1.2 riastrad } 181 1.1 riastrad 182 1.3 riastrad /* 183 1.3 riastrad * Can we unpin some objects such as idle hw contents, 184 1.3 riastrad * or pending flips? But since only the GGTT has global entries 185 1.3 riastrad * such as scanouts, rinbuffers and contexts, we can skip the 186 1.3 riastrad * purge when inspecting per-process local address spaces. 187 1.2 riastrad */ 188 1.3 riastrad if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 189 1.2 riastrad return -ENOSPC; 190 1.2 riastrad 191 1.3 riastrad /* 192 1.3 riastrad * Not everything in the GGTT is tracked via VMA using 193 1.3 riastrad * i915_vma_move_to_active(), otherwise we could evict as required 194 1.3 riastrad * with minimal stalling. Instead we are forced to idle the GPU and 195 1.3 riastrad * explicitly retire outstanding requests which will then remove 196 1.3 riastrad * the pinning for active objects such as contexts and ring, 197 1.3 riastrad * enabling us to evict them on the next iteration. 198 1.3 riastrad * 199 1.3 riastrad * To ensure that all user contexts are evictable, we perform 200 1.3 riastrad * a switch to the perma-pinned kernel context. This all also gives 201 1.3 riastrad * us a termination condition, when the last retired context is 202 1.3 riastrad * the kernel's there is no more we can evict. 203 1.3 riastrad */ 204 1.3 riastrad if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) 205 1.3 riastrad return -EBUSY; 206 1.3 riastrad 207 1.3 riastrad ret = ggtt_flush(vm->gt); 208 1.3 riastrad if (ret) 209 1.3 riastrad return ret; 210 1.1 riastrad 211 1.3 riastrad cond_resched(); 212 1.1 riastrad 213 1.3 riastrad flags |= PIN_NONBLOCK; 214 1.3 riastrad goto search_again; 215 1.1 riastrad 216 1.1 riastrad found: 217 1.1 riastrad /* drm_mm doesn't allow any other other operations while 218 1.3 riastrad * scanning, therefore store to-be-evicted objects on a 219 1.3 riastrad * temporary list and take a reference for all before 220 1.3 riastrad * calling unbind (which may remove the active reference 221 1.3 riastrad * of any of our objects, thus corrupting the list). 222 1.3 riastrad */ 223 1.3 riastrad list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 224 1.3 riastrad if (drm_mm_scan_remove_block(&scan, &vma->node)) 225 1.3 riastrad __i915_vma_pin(vma); 226 1.3 riastrad else 227 1.3 riastrad list_del(&vma->evict_link); 228 1.1 riastrad } 229 1.1 riastrad 230 1.1 riastrad /* Unbinding will emit any required flushes */ 231 1.3 riastrad ret = 0; 232 1.3 riastrad list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 233 1.3 riastrad __i915_vma_unpin(vma); 234 1.3 riastrad if (ret == 0) 235 1.3 riastrad ret = __i915_vma_unbind(vma); 236 1.3 riastrad } 237 1.3 riastrad 238 1.3 riastrad while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { 239 1.3 riastrad vma = container_of(node, struct i915_vma, node); 240 1.3 riastrad ret = __i915_vma_unbind(vma); 241 1.3 riastrad } 242 1.3 riastrad 243 1.3 riastrad return ret; 244 1.3 riastrad } 245 1.3 riastrad 246 1.3 riastrad /** 247 1.3 riastrad * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one 248 1.3 riastrad * @vm: address space to evict from 249 1.3 riastrad * @target: range (and color) to evict for 250 1.3 riastrad * @flags: additional flags to control the eviction algorithm 251 1.3 riastrad * 252 1.3 riastrad * This function will try to evict vmas that overlap the target node. 253 1.3 riastrad * 254 1.3 riastrad * To clarify: This is for freeing up virtual address space, not for freeing 255 1.3 riastrad * memory in e.g. the shrinker. 256 1.3 riastrad */ 257 1.3 riastrad int i915_gem_evict_for_node(struct i915_address_space *vm, 258 1.3 riastrad struct drm_mm_node *target, 259 1.3 riastrad unsigned int flags) 260 1.3 riastrad { 261 1.6 riastrad LIST_HEAD(eviction_list); 262 1.3 riastrad struct drm_mm_node *node; 263 1.3 riastrad u64 start = target->start; 264 1.3 riastrad u64 end = start + target->size; 265 1.3 riastrad struct i915_vma *vma, *next; 266 1.3 riastrad int ret = 0; 267 1.3 riastrad 268 1.6 riastrad lockdep_assert_held(&vm->mutex); 269 1.3 riastrad GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 270 1.3 riastrad GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 271 1.3 riastrad 272 1.3 riastrad trace_i915_gem_evict_node(vm, target, flags); 273 1.3 riastrad 274 1.3 riastrad /* 275 1.3 riastrad * Retire before we search the active list. Although we have 276 1.3 riastrad * reasonable accuracy in our retirement lists, we may have 277 1.3 riastrad * a stray pin (preventing eviction) that can only be resolved by 278 1.3 riastrad * retiring. 279 1.3 riastrad */ 280 1.3 riastrad intel_gt_retire_requests(vm->gt); 281 1.3 riastrad 282 1.3 riastrad if (i915_vm_has_cache_coloring(vm)) { 283 1.3 riastrad /* Expand search to cover neighbouring guard pages (or lack!) */ 284 1.3 riastrad if (start) 285 1.3 riastrad start -= I915_GTT_PAGE_SIZE; 286 1.3 riastrad 287 1.3 riastrad /* Always look at the page afterwards to avoid the end-of-GTT */ 288 1.3 riastrad end += I915_GTT_PAGE_SIZE; 289 1.3 riastrad } 290 1.3 riastrad GEM_BUG_ON(start >= end); 291 1.3 riastrad 292 1.3 riastrad drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { 293 1.3 riastrad /* If we find any non-objects (!vma), we cannot evict them */ 294 1.3 riastrad if (node->color == I915_COLOR_UNEVICTABLE) { 295 1.3 riastrad ret = -ENOSPC; 296 1.3 riastrad break; 297 1.3 riastrad } 298 1.2 riastrad 299 1.3 riastrad GEM_BUG_ON(!drm_mm_node_allocated(node)); 300 1.3 riastrad vma = container_of(node, typeof(*vma), node); 301 1.3 riastrad 302 1.3 riastrad /* If we are using coloring to insert guard pages between 303 1.3 riastrad * different cache domains within the address space, we have 304 1.3 riastrad * to check whether the objects on either side of our range 305 1.3 riastrad * abutt and conflict. If they are in conflict, then we evict 306 1.3 riastrad * those as well to make room for our guard pages. 307 1.3 riastrad */ 308 1.3 riastrad if (i915_vm_has_cache_coloring(vm)) { 309 1.3 riastrad if (node->start + node->size == target->start) { 310 1.3 riastrad if (node->color == target->color) 311 1.3 riastrad continue; 312 1.3 riastrad } 313 1.3 riastrad if (node->start == target->start + target->size) { 314 1.3 riastrad if (node->color == target->color) 315 1.3 riastrad continue; 316 1.3 riastrad } 317 1.3 riastrad } 318 1.3 riastrad 319 1.3 riastrad if (flags & PIN_NONBLOCK && 320 1.3 riastrad (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) { 321 1.3 riastrad ret = -ENOSPC; 322 1.3 riastrad break; 323 1.3 riastrad } 324 1.3 riastrad 325 1.3 riastrad /* Overlap of objects in the same batch? */ 326 1.3 riastrad if (i915_vma_is_pinned(vma)) { 327 1.3 riastrad ret = -ENOSPC; 328 1.3 riastrad if (vma->exec_flags && 329 1.3 riastrad *vma->exec_flags & EXEC_OBJECT_PINNED) 330 1.3 riastrad ret = -EINVAL; 331 1.3 riastrad break; 332 1.3 riastrad } 333 1.3 riastrad 334 1.3 riastrad /* Never show fear in the face of dragons! 335 1.3 riastrad * 336 1.3 riastrad * We cannot directly remove this node from within this 337 1.3 riastrad * iterator and as with i915_gem_evict_something() we employ 338 1.3 riastrad * the vma pin_count in order to prevent the action of 339 1.3 riastrad * unbinding one vma from freeing (by dropping its active 340 1.3 riastrad * reference) another in our eviction list. 341 1.3 riastrad */ 342 1.3 riastrad __i915_vma_pin(vma); 343 1.3 riastrad list_add(&vma->evict_link, &eviction_list); 344 1.3 riastrad } 345 1.3 riastrad 346 1.3 riastrad list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 347 1.3 riastrad __i915_vma_unpin(vma); 348 1.1 riastrad if (ret == 0) 349 1.3 riastrad ret = __i915_vma_unbind(vma); 350 1.1 riastrad } 351 1.1 riastrad 352 1.1 riastrad return ret; 353 1.1 riastrad } 354 1.1 riastrad 355 1.2 riastrad /** 356 1.2 riastrad * i915_gem_evict_vm - Evict all idle vmas from a vm 357 1.2 riastrad * @vm: Address space to cleanse 358 1.2 riastrad * 359 1.3 riastrad * This function evicts all vmas from a vm. 360 1.2 riastrad * 361 1.2 riastrad * This is used by the execbuf code as a last-ditch effort to defragment the 362 1.2 riastrad * address space. 363 1.2 riastrad * 364 1.2 riastrad * To clarify: This is for freeing up virtual address space, not for freeing 365 1.2 riastrad * memory in e.g. the shrinker. 366 1.2 riastrad */ 367 1.3 riastrad int i915_gem_evict_vm(struct i915_address_space *vm) 368 1.1 riastrad { 369 1.3 riastrad int ret = 0; 370 1.1 riastrad 371 1.3 riastrad lockdep_assert_held(&vm->mutex); 372 1.2 riastrad trace_i915_gem_evict_vm(vm); 373 1.2 riastrad 374 1.3 riastrad /* Switch back to the default context in order to unpin 375 1.3 riastrad * the existing context objects. However, such objects only 376 1.3 riastrad * pin themselves inside the global GTT and performing the 377 1.3 riastrad * switch otherwise is ineffective. 378 1.3 riastrad */ 379 1.3 riastrad if (i915_is_ggtt(vm)) { 380 1.3 riastrad ret = ggtt_flush(vm->gt); 381 1.2 riastrad if (ret) 382 1.2 riastrad return ret; 383 1.3 riastrad } 384 1.2 riastrad 385 1.3 riastrad do { 386 1.3 riastrad struct i915_vma *vma, *vn; 387 1.3 riastrad LIST_HEAD(eviction_list); 388 1.3 riastrad 389 1.3 riastrad list_for_each_entry(vma, &vm->bound_list, vm_link) { 390 1.3 riastrad if (i915_vma_is_pinned(vma)) 391 1.3 riastrad continue; 392 1.1 riastrad 393 1.3 riastrad __i915_vma_pin(vma); 394 1.3 riastrad list_add(&vma->evict_link, &eviction_list); 395 1.3 riastrad } 396 1.3 riastrad if (list_empty(&eviction_list)) 397 1.3 riastrad break; 398 1.1 riastrad 399 1.3 riastrad ret = 0; 400 1.3 riastrad list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { 401 1.3 riastrad __i915_vma_unpin(vma); 402 1.3 riastrad if (ret == 0) 403 1.3 riastrad ret = __i915_vma_unbind(vma); 404 1.3 riastrad if (ret != -EINTR) /* "Get me out of here!" */ 405 1.3 riastrad ret = 0; 406 1.3 riastrad } 407 1.3 riastrad } while (ret == 0); 408 1.1 riastrad 409 1.3 riastrad return ret; 410 1.1 riastrad } 411 1.3 riastrad 412 1.3 riastrad #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 413 1.3 riastrad #include "selftests/i915_gem_evict.c" 414 1.3 riastrad #endif 415