1 /* $NetBSD: i915_gem_evict.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */ 2 3 /* 4 * Copyright 2016 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: i915_gem_evict.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $"); 29 30 #include "gem/i915_gem_pm.h" 31 #include "gem/selftests/igt_gem_utils.h" 32 #include "gem/selftests/mock_context.h" 33 #include "gt/intel_gt.h" 34 35 #include "i915_selftest.h" 36 37 #include "igt_flush_test.h" 38 #include "lib_sw_fence.h" 39 #include "mock_drm.h" 40 #include "mock_gem_device.h" 41 42 static void quirk_add(struct drm_i915_gem_object *obj, 43 struct list_head *objects) 44 { 45 /* quirk is only for live tiled objects, use it to declare ownership */ 46 GEM_BUG_ON(obj->mm.quirked); 47 obj->mm.quirked = true; 48 list_add(&obj->st_link, objects); 49 } 50 51 static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects) 52 { 53 unsigned long unbound, bound, count; 54 struct drm_i915_gem_object *obj; 55 56 count = 0; 57 do { 58 struct i915_vma *vma; 59 60 obj = i915_gem_object_create_internal(ggtt->vm.i915, 61 I915_GTT_PAGE_SIZE); 62 if (IS_ERR(obj)) 63 return PTR_ERR(obj); 64 65 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); 66 if (IS_ERR(vma)) { 67 i915_gem_object_put(obj); 68 if (vma == ERR_PTR(-ENOSPC)) 69 break; 70 71 return PTR_ERR(vma); 72 } 73 74 quirk_add(obj, objects); 75 count++; 76 } while (1); 77 pr_debug("Filled GGTT with %lu pages [%llu total]\n", 78 count, ggtt->vm.total / PAGE_SIZE); 79 80 bound = 0; 81 unbound = 0; 82 list_for_each_entry(obj, objects, st_link) { 83 GEM_BUG_ON(!obj->mm.quirked); 84 85 if (atomic_read(&obj->bind_count)) 86 bound++; 87 else 88 unbound++; 89 } 90 GEM_BUG_ON(bound + unbound != count); 91 92 if (unbound) { 93 pr_err("%s: Found %lu objects unbound, expected %u!\n", 94 __func__, unbound, 0); 95 return -EINVAL; 96 } 97 98 if (bound != count) { 99 pr_err("%s: Found %lu objects bound, expected %lu!\n", 100 __func__, bound, count); 101 return -EINVAL; 102 } 103 104 if (list_empty(&ggtt->vm.bound_list)) { 105 pr_err("No objects on the GGTT inactive list!\n"); 106 return -EINVAL; 107 } 108 109 return 0; 110 } 111 112 static void unpin_ggtt(struct i915_ggtt *ggtt) 113 { 114 struct i915_vma *vma; 115 116 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) 117 if (vma->obj->mm.quirked) 118 i915_vma_unpin(vma); 119 } 120 121 static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list) 122 { 123 struct drm_i915_gem_object *obj, *on; 124 125 list_for_each_entry_safe(obj, on, list, st_link) { 126 GEM_BUG_ON(!obj->mm.quirked); 127 obj->mm.quirked = false; 128 i915_gem_object_put(obj); 129 } 130 131 i915_gem_drain_freed_objects(ggtt->vm.i915); 132 } 133 134 static int igt_evict_something(void *arg) 135 { 136 struct intel_gt *gt = arg; 137 struct i915_ggtt *ggtt = gt->ggtt; 138 LIST_HEAD(objects); 139 int err; 140 141 /* Fill the GGTT with pinned objects and try to evict one. */ 142 143 err = populate_ggtt(ggtt, &objects); 144 if (err) 145 goto cleanup; 146 147 /* Everything is pinned, nothing should happen */ 148 mutex_lock(&ggtt->vm.mutex); 149 err = i915_gem_evict_something(&ggtt->vm, 150 I915_GTT_PAGE_SIZE, 0, 0, 151 0, U64_MAX, 152 0); 153 mutex_unlock(&ggtt->vm.mutex); 154 if (err != -ENOSPC) { 155 pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n", 156 err); 157 goto cleanup; 158 } 159 160 unpin_ggtt(ggtt); 161 162 /* Everything is unpinned, we should be able to evict something */ 163 mutex_lock(&ggtt->vm.mutex); 164 err = i915_gem_evict_something(&ggtt->vm, 165 I915_GTT_PAGE_SIZE, 0, 0, 166 0, U64_MAX, 167 0); 168 mutex_unlock(&ggtt->vm.mutex); 169 if (err) { 170 pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n", 171 err); 172 goto cleanup; 173 } 174 175 cleanup: 176 cleanup_objects(ggtt, &objects); 177 return err; 178 } 179 180 static int igt_overcommit(void *arg) 181 { 182 struct intel_gt *gt = arg; 183 struct i915_ggtt *ggtt = gt->ggtt; 184 struct drm_i915_gem_object *obj; 185 struct i915_vma *vma; 186 LIST_HEAD(objects); 187 int err; 188 189 /* Fill the GGTT with pinned objects and then try to pin one more. 190 * We expect it to fail. 191 */ 192 193 err = populate_ggtt(ggtt, &objects); 194 if (err) 195 goto cleanup; 196 197 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); 198 if (IS_ERR(obj)) { 199 err = PTR_ERR(obj); 200 goto cleanup; 201 } 202 203 quirk_add(obj, &objects); 204 205 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); 206 if (vma != ERR_PTR(-ENOSPC)) { 207 pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma)); 208 err = -EINVAL; 209 goto cleanup; 210 } 211 212 cleanup: 213 cleanup_objects(ggtt, &objects); 214 return err; 215 } 216 217 static int igt_evict_for_vma(void *arg) 218 { 219 struct intel_gt *gt = arg; 220 struct i915_ggtt *ggtt = gt->ggtt; 221 struct drm_mm_node target = { 222 .start = 0, 223 .size = 4096, 224 }; 225 LIST_HEAD(objects); 226 int err; 227 228 /* Fill the GGTT with pinned objects and try to evict a range. */ 229 230 err = populate_ggtt(ggtt, &objects); 231 if (err) 232 goto cleanup; 233 234 /* Everything is pinned, nothing should happen */ 235 mutex_lock(&ggtt->vm.mutex); 236 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); 237 mutex_unlock(&ggtt->vm.mutex); 238 if (err != -ENOSPC) { 239 pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n", 240 err); 241 goto cleanup; 242 } 243 244 unpin_ggtt(ggtt); 245 246 /* Everything is unpinned, we should be able to evict the node */ 247 mutex_lock(&ggtt->vm.mutex); 248 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); 249 mutex_unlock(&ggtt->vm.mutex); 250 if (err) { 251 pr_err("i915_gem_evict_for_node returned err=%d\n", 252 err); 253 goto cleanup; 254 } 255 256 cleanup: 257 cleanup_objects(ggtt, &objects); 258 return err; 259 } 260 261 static void mock_color_adjust(const struct drm_mm_node *node, 262 unsigned long color, 263 u64 *start, 264 u64 *end) 265 { 266 } 267 268 static int igt_evict_for_cache_color(void *arg) 269 { 270 struct intel_gt *gt = arg; 271 struct i915_ggtt *ggtt = gt->ggtt; 272 const unsigned long flags = PIN_OFFSET_FIXED; 273 struct drm_mm_node target = { 274 .start = I915_GTT_PAGE_SIZE * 2, 275 .size = I915_GTT_PAGE_SIZE, 276 .color = I915_CACHE_LLC, 277 }; 278 struct drm_i915_gem_object *obj; 279 struct i915_vma *vma; 280 LIST_HEAD(objects); 281 int err; 282 283 /* 284 * Currently the use of color_adjust for the GGTT is limited to cache 285 * coloring and guard pages, and so the presence of mm.color_adjust for 286 * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock 287 * color adjust will work just fine for our purposes. 288 */ 289 ggtt->vm.mm.color_adjust = mock_color_adjust; 290 GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm)); 291 292 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); 293 if (IS_ERR(obj)) { 294 err = PTR_ERR(obj); 295 goto cleanup; 296 } 297 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 298 quirk_add(obj, &objects); 299 300 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 301 I915_GTT_PAGE_SIZE | flags); 302 if (IS_ERR(vma)) { 303 pr_err("[0]i915_gem_object_ggtt_pin failed\n"); 304 err = PTR_ERR(vma); 305 goto cleanup; 306 } 307 308 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); 309 if (IS_ERR(obj)) { 310 err = PTR_ERR(obj); 311 goto cleanup; 312 } 313 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 314 quirk_add(obj, &objects); 315 316 /* Neighbouring; same colour - should fit */ 317 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 318 (I915_GTT_PAGE_SIZE * 2) | flags); 319 if (IS_ERR(vma)) { 320 pr_err("[1]i915_gem_object_ggtt_pin failed\n"); 321 err = PTR_ERR(vma); 322 goto cleanup; 323 } 324 325 i915_vma_unpin(vma); 326 327 /* Remove just the second vma */ 328 mutex_lock(&ggtt->vm.mutex); 329 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); 330 mutex_unlock(&ggtt->vm.mutex); 331 if (err) { 332 pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err); 333 goto cleanup; 334 } 335 336 /* Attempt to remove the first *pinned* vma, by removing the (empty) 337 * neighbour -- this should fail. 338 */ 339 target.color = I915_CACHE_L3_LLC; 340 341 mutex_lock(&ggtt->vm.mutex); 342 err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); 343 mutex_unlock(&ggtt->vm.mutex); 344 if (!err) { 345 pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err); 346 err = -EINVAL; 347 goto cleanup; 348 } 349 350 err = 0; 351 352 cleanup: 353 unpin_ggtt(ggtt); 354 cleanup_objects(ggtt, &objects); 355 ggtt->vm.mm.color_adjust = NULL; 356 return err; 357 } 358 359 static int igt_evict_vm(void *arg) 360 { 361 struct intel_gt *gt = arg; 362 struct i915_ggtt *ggtt = gt->ggtt; 363 LIST_HEAD(objects); 364 int err; 365 366 /* Fill the GGTT with pinned objects and try to evict everything. */ 367 368 err = populate_ggtt(ggtt, &objects); 369 if (err) 370 goto cleanup; 371 372 /* Everything is pinned, nothing should happen */ 373 mutex_lock(&ggtt->vm.mutex); 374 err = i915_gem_evict_vm(&ggtt->vm); 375 mutex_unlock(&ggtt->vm.mutex); 376 if (err) { 377 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", 378 err); 379 goto cleanup; 380 } 381 382 unpin_ggtt(ggtt); 383 384 mutex_lock(&ggtt->vm.mutex); 385 err = i915_gem_evict_vm(&ggtt->vm); 386 mutex_unlock(&ggtt->vm.mutex); 387 if (err) { 388 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", 389 err); 390 goto cleanup; 391 } 392 393 cleanup: 394 cleanup_objects(ggtt, &objects); 395 return err; 396 } 397 398 static int igt_evict_contexts(void *arg) 399 { 400 const u64 PRETEND_GGTT_SIZE = 16ull << 20; 401 struct intel_gt *gt = arg; 402 struct i915_ggtt *ggtt = gt->ggtt; 403 struct drm_i915_private *i915 = gt->i915; 404 struct intel_engine_cs *engine; 405 enum intel_engine_id id; 406 struct reserved { 407 struct drm_mm_node node; 408 struct reserved *next; 409 } *reserved = NULL; 410 intel_wakeref_t wakeref; 411 struct drm_mm_node hole; 412 unsigned long count; 413 int err; 414 415 /* 416 * The purpose of this test is to verify that we will trigger an 417 * eviction in the GGTT when constructing a request that requires 418 * additional space in the GGTT for pinning the context. This space 419 * is not directly tied to the request so reclaiming it requires 420 * extra work. 421 * 422 * As such this test is only meaningful for full-ppgtt environments 423 * where the GTT space of the request is separate from the GGTT 424 * allocation required to build the request. 425 */ 426 if (!HAS_FULL_PPGTT(i915)) 427 return 0; 428 429 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 430 431 /* Reserve a block so that we know we have enough to fit a few rq */ 432 memset(&hole, 0, sizeof(hole)); 433 mutex_lock(&ggtt->vm.mutex); 434 err = i915_gem_gtt_insert(&ggtt->vm, &hole, 435 PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE, 436 0, ggtt->vm.total, 437 PIN_NOEVICT); 438 if (err) 439 goto out_locked; 440 441 /* Make the GGTT appear small by filling it with unevictable nodes */ 442 count = 0; 443 do { 444 struct reserved *r; 445 446 mutex_unlock(&ggtt->vm.mutex); 447 r = kcalloc(1, sizeof(*r), GFP_KERNEL); 448 mutex_lock(&ggtt->vm.mutex); 449 if (!r) { 450 err = -ENOMEM; 451 goto out_locked; 452 } 453 454 if (i915_gem_gtt_insert(&ggtt->vm, &r->node, 455 1ul << 20, 0, I915_COLOR_UNEVICTABLE, 456 0, ggtt->vm.total, 457 PIN_NOEVICT)) { 458 kfree(r); 459 break; 460 } 461 462 r->next = reserved; 463 reserved = r; 464 465 count++; 466 } while (1); 467 drm_mm_remove_node(&hole); 468 mutex_unlock(&ggtt->vm.mutex); 469 pr_info("Filled GGTT with %lu 1MiB nodes\n", count); 470 471 /* Overfill the GGTT with context objects and so try to evict one. */ 472 for_each_engine(engine, gt, id) { 473 struct i915_sw_fence fence; 474 struct file *file; 475 476 file = mock_file(i915); 477 if (IS_ERR(file)) { 478 err = PTR_ERR(file); 479 break; 480 } 481 482 count = 0; 483 onstack_fence_init(&fence); 484 do { 485 struct i915_request *rq; 486 struct i915_gem_context *ctx; 487 488 ctx = live_context(i915, file); 489 if (IS_ERR(ctx)) 490 break; 491 492 /* We will need some GGTT space for the rq's context */ 493 igt_evict_ctl.fail_if_busy = true; 494 rq = igt_request_alloc(ctx, engine); 495 igt_evict_ctl.fail_if_busy = false; 496 497 if (IS_ERR(rq)) { 498 /* When full, fail_if_busy will trigger EBUSY */ 499 if (PTR_ERR(rq) != -EBUSY) { 500 pr_err("Unexpected error from request alloc (on %s): %d\n", 501 engine->name, 502 (int)PTR_ERR(rq)); 503 err = PTR_ERR(rq); 504 } 505 break; 506 } 507 508 /* Keep every request/ctx pinned until we are full */ 509 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 510 &fence, 511 GFP_KERNEL); 512 if (err < 0) 513 break; 514 515 i915_request_add(rq); 516 count++; 517 err = 0; 518 } while(1); 519 onstack_fence_fini(&fence); 520 pr_info("Submitted %lu contexts/requests on %s\n", 521 count, engine->name); 522 523 fput(file); 524 if (err) 525 break; 526 } 527 528 mutex_lock(&ggtt->vm.mutex); 529 out_locked: 530 if (igt_flush_test(i915)) 531 err = -EIO; 532 while (reserved) { 533 struct reserved *next = reserved->next; 534 535 drm_mm_remove_node(&reserved->node); 536 kfree(reserved); 537 538 reserved = next; 539 } 540 if (drm_mm_node_allocated(&hole)) 541 drm_mm_remove_node(&hole); 542 mutex_unlock(&ggtt->vm.mutex); 543 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 544 545 return err; 546 } 547 548 int i915_gem_evict_mock_selftests(void) 549 { 550 static const struct i915_subtest tests[] = { 551 SUBTEST(igt_evict_something), 552 SUBTEST(igt_evict_for_vma), 553 SUBTEST(igt_evict_for_cache_color), 554 SUBTEST(igt_evict_vm), 555 SUBTEST(igt_overcommit), 556 }; 557 struct drm_i915_private *i915; 558 intel_wakeref_t wakeref; 559 int err = 0; 560 561 i915 = mock_gem_device(); 562 if (!i915) 563 return -ENOMEM; 564 565 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 566 err = i915_subtests(tests, &i915->gt); 567 568 drm_dev_put(&i915->drm); 569 return err; 570 } 571 572 int i915_gem_evict_live_selftests(struct drm_i915_private *i915) 573 { 574 static const struct i915_subtest tests[] = { 575 SUBTEST(igt_evict_contexts), 576 }; 577 578 if (intel_gt_is_wedged(&i915->gt)) 579 return 0; 580 581 return intel_gt_live_subtests(tests, &i915->gt); 582 } 583