1 /* $NetBSD: i915_buddy.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */ 2 3 // SPDX-License-Identifier: MIT 4 /* 5 * Copyright 2019 Intel Corporation 6 */ 7 8 #include <sys/cdefs.h> 9 __KERNEL_RCSID(0, "$NetBSD: i915_buddy.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $"); 10 11 #include <linux/prime_numbers.h> 12 13 #include "../i915_selftest.h" 14 #include "i915_random.h" 15 16 #define SZ_8G (1ULL << 33) 17 18 static void __igt_dump_block(struct i915_buddy_mm *mm, 19 struct i915_buddy_block *block, 20 bool buddy) 21 { 22 pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n", 23 block->header, 24 i915_buddy_block_state(block), 25 i915_buddy_block_order(block), 26 i915_buddy_block_offset(block), 27 i915_buddy_block_size(mm, block), 28 yesno(!block->parent), 29 yesno(buddy)); 30 } 31 32 static void igt_dump_block(struct i915_buddy_mm *mm, 33 struct i915_buddy_block *block) 34 { 35 struct i915_buddy_block *buddy; 36 37 __igt_dump_block(mm, block, false); 38 39 buddy = get_buddy(block); 40 if (buddy) 41 __igt_dump_block(mm, buddy, true); 42 } 43 44 static int igt_check_block(struct i915_buddy_mm *mm, 45 struct i915_buddy_block *block) 46 { 47 struct i915_buddy_block *buddy; 48 unsigned int block_state; 49 u64 block_size; 50 u64 offset; 51 int err = 0; 52 53 block_state = i915_buddy_block_state(block); 54 55 if (block_state != I915_BUDDY_ALLOCATED && 56 block_state != I915_BUDDY_FREE && 57 block_state != I915_BUDDY_SPLIT) { 58 pr_err("block state mismatch\n"); 59 err = -EINVAL; 60 } 61 62 block_size = i915_buddy_block_size(mm, block); 63 offset = i915_buddy_block_offset(block); 64 65 if (block_size < mm->chunk_size) { 66 pr_err("block size smaller than min size\n"); 67 err = -EINVAL; 68 } 69 70 if (!is_power_of_2(block_size)) { 71 pr_err("block size not power of two\n"); 72 err = -EINVAL; 73 } 74 75 if (!IS_ALIGNED(block_size, mm->chunk_size)) { 76 pr_err("block size not aligned to min size\n"); 77 err = -EINVAL; 78 } 79 80 if (!IS_ALIGNED(offset, mm->chunk_size)) { 81 pr_err("block offset not aligned to min size\n"); 82 err = -EINVAL; 83 } 84 85 if (!IS_ALIGNED(offset, block_size)) { 86 pr_err("block offset not aligned to block size\n"); 87 err = -EINVAL; 88 } 89 90 buddy = get_buddy(block); 91 92 if (!buddy && block->parent) { 93 pr_err("buddy has gone fishing\n"); 94 err = -EINVAL; 95 } 96 97 if (buddy) { 98 if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) { 99 pr_err("buddy has wrong offset\n"); 100 err = -EINVAL; 101 } 102 103 if (i915_buddy_block_size(mm, buddy) != block_size) { 104 pr_err("buddy size mismatch\n"); 105 err = -EINVAL; 106 } 107 108 if (i915_buddy_block_state(buddy) == block_state && 109 block_state == I915_BUDDY_FREE) { 110 pr_err("block and its buddy are free\n"); 111 err = -EINVAL; 112 } 113 } 114 115 return err; 116 } 117 118 static int igt_check_blocks(struct i915_buddy_mm *mm, 119 struct list_head *blocks, 120 u64 expected_size, 121 bool is_contiguous) 122 { 123 struct i915_buddy_block *block; 124 struct i915_buddy_block *prev; 125 u64 total; 126 int err = 0; 127 128 block = NULL; 129 prev = NULL; 130 total = 0; 131 132 list_for_each_entry(block, blocks, link) { 133 err = igt_check_block(mm, block); 134 135 if (!i915_buddy_block_is_allocated(block)) { 136 pr_err("block not allocated\n"), 137 err = -EINVAL; 138 } 139 140 if (is_contiguous && prev) { 141 u64 prev_block_size; 142 u64 prev_offset; 143 u64 offset; 144 145 prev_offset = i915_buddy_block_offset(prev); 146 prev_block_size = i915_buddy_block_size(mm, prev); 147 offset = i915_buddy_block_offset(block); 148 149 if (offset != (prev_offset + prev_block_size)) { 150 pr_err("block offset mismatch\n"); 151 err = -EINVAL; 152 } 153 } 154 155 if (err) 156 break; 157 158 total += i915_buddy_block_size(mm, block); 159 prev = block; 160 } 161 162 if (!err) { 163 if (total != expected_size) { 164 pr_err("size mismatch, expected=%llx, found=%llx\n", 165 expected_size, total); 166 err = -EINVAL; 167 } 168 return err; 169 } 170 171 if (prev) { 172 pr_err("prev block, dump:\n"); 173 igt_dump_block(mm, prev); 174 } 175 176 if (block) { 177 pr_err("bad block, dump:\n"); 178 igt_dump_block(mm, block); 179 } 180 181 return err; 182 } 183 184 static int igt_check_mm(struct i915_buddy_mm *mm) 185 { 186 struct i915_buddy_block *root; 187 struct i915_buddy_block *prev; 188 unsigned int i; 189 u64 total; 190 int err = 0; 191 192 if (!mm->n_roots) { 193 pr_err("n_roots is zero\n"); 194 return -EINVAL; 195 } 196 197 if (mm->n_roots != hweight64(mm->size)) { 198 pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n", 199 mm->n_roots, hweight64(mm->size)); 200 return -EINVAL; 201 } 202 203 root = NULL; 204 prev = NULL; 205 total = 0; 206 207 for (i = 0; i < mm->n_roots; ++i) { 208 struct i915_buddy_block *block; 209 unsigned int order; 210 211 root = mm->roots[i]; 212 if (!root) { 213 pr_err("root(%u) is NULL\n", i); 214 err = -EINVAL; 215 break; 216 } 217 218 err = igt_check_block(mm, root); 219 220 if (!i915_buddy_block_is_free(root)) { 221 pr_err("root not free\n"); 222 err = -EINVAL; 223 } 224 225 order = i915_buddy_block_order(root); 226 227 if (!i) { 228 if (order != mm->max_order) { 229 pr_err("max order root missing\n"); 230 err = -EINVAL; 231 } 232 } 233 234 if (prev) { 235 u64 prev_block_size; 236 u64 prev_offset; 237 u64 offset; 238 239 prev_offset = i915_buddy_block_offset(prev); 240 prev_block_size = i915_buddy_block_size(mm, prev); 241 offset = i915_buddy_block_offset(root); 242 243 if (offset != (prev_offset + prev_block_size)) { 244 pr_err("root offset mismatch\n"); 245 err = -EINVAL; 246 } 247 } 248 249 block = list_first_entry_or_null(&mm->free_list[order], 250 struct i915_buddy_block, 251 link); 252 if (block != root) { 253 pr_err("root mismatch at order=%u\n", order); 254 err = -EINVAL; 255 } 256 257 if (err) 258 break; 259 260 prev = root; 261 total += i915_buddy_block_size(mm, root); 262 } 263 264 if (!err) { 265 if (total != mm->size) { 266 pr_err("expected mm size=%llx, found=%llx\n", mm->size, 267 total); 268 err = -EINVAL; 269 } 270 return err; 271 } 272 273 if (prev) { 274 pr_err("prev root(%u), dump:\n", i - 1); 275 igt_dump_block(mm, prev); 276 } 277 278 if (root) { 279 pr_err("bad root(%u), dump:\n", i); 280 igt_dump_block(mm, root); 281 } 282 283 return err; 284 } 285 286 static void igt_mm_config(u64 *size, u64 *chunk_size) 287 { 288 I915_RND_STATE(prng); 289 u64 s, ms; 290 291 /* Nothing fancy, just try to get an interesting bit pattern */ 292 293 prandom_seed_state(&prng, i915_selftest.random_seed); 294 295 s = i915_prandom_u64_state(&prng) & (SZ_8G - 1); 296 ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12))); 297 s = max(s & -ms, ms); 298 299 *chunk_size = ms; 300 *size = s; 301 } 302 303 static int igt_buddy_alloc_smoke(void *arg) 304 { 305 struct i915_buddy_mm mm; 306 int max_order; 307 u64 chunk_size; 308 u64 mm_size; 309 int err; 310 311 igt_mm_config(&mm_size, &chunk_size); 312 313 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size); 314 315 err = i915_buddy_init(&mm, mm_size, chunk_size); 316 if (err) { 317 pr_err("buddy_init failed(%d)\n", err); 318 return err; 319 } 320 321 for (max_order = mm.max_order; max_order >= 0; max_order--) { 322 struct i915_buddy_block *block; 323 int order; 324 LIST_HEAD(blocks); 325 u64 total; 326 327 err = igt_check_mm(&mm); 328 if (err) { 329 pr_err("pre-mm check failed, abort\n"); 330 break; 331 } 332 333 pr_info("filling from max_order=%u\n", max_order); 334 335 order = max_order; 336 total = 0; 337 338 do { 339 retry: 340 block = i915_buddy_alloc(&mm, order); 341 if (IS_ERR(block)) { 342 err = PTR_ERR(block); 343 if (err == -ENOMEM) { 344 pr_info("buddy_alloc hit -ENOMEM with order=%d\n", 345 order); 346 } else { 347 if (order--) { 348 err = 0; 349 goto retry; 350 } 351 352 pr_err("buddy_alloc with order=%d failed(%d)\n", 353 order, err); 354 } 355 356 break; 357 } 358 359 list_add_tail(&block->link, &blocks); 360 361 if (i915_buddy_block_order(block) != order) { 362 pr_err("buddy_alloc order mismatch\n"); 363 err = -EINVAL; 364 break; 365 } 366 367 total += i915_buddy_block_size(&mm, block); 368 } while (total < mm.size); 369 370 if (!err) 371 err = igt_check_blocks(&mm, &blocks, total, false); 372 373 i915_buddy_free_list(&mm, &blocks); 374 375 if (!err) { 376 err = igt_check_mm(&mm); 377 if (err) 378 pr_err("post-mm check failed\n"); 379 } 380 381 if (err) 382 break; 383 384 cond_resched(); 385 } 386 387 if (err == -ENOMEM) 388 err = 0; 389 390 i915_buddy_fini(&mm); 391 392 return err; 393 } 394 395 static int igt_buddy_alloc_pessimistic(void *arg) 396 { 397 const unsigned int max_order = 16; 398 struct i915_buddy_block *block, *bn; 399 struct i915_buddy_mm mm; 400 unsigned int order; 401 LIST_HEAD(blocks); 402 int err; 403 404 /* 405 * Create a pot-sized mm, then allocate one of each possible 406 * order within. This should leave the mm with exactly one 407 * page left. 408 */ 409 410 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE); 411 if (err) { 412 pr_err("buddy_init failed(%d)\n", err); 413 return err; 414 } 415 GEM_BUG_ON(mm.max_order != max_order); 416 417 for (order = 0; order < max_order; order++) { 418 block = i915_buddy_alloc(&mm, order); 419 if (IS_ERR(block)) { 420 pr_info("buddy_alloc hit -ENOMEM with order=%d\n", 421 order); 422 err = PTR_ERR(block); 423 goto err; 424 } 425 426 list_add_tail(&block->link, &blocks); 427 } 428 429 /* And now the last remaining block available */ 430 block = i915_buddy_alloc(&mm, 0); 431 if (IS_ERR(block)) { 432 pr_info("buddy_alloc hit -ENOMEM on final alloc\n"); 433 err = PTR_ERR(block); 434 goto err; 435 } 436 list_add_tail(&block->link, &blocks); 437 438 /* Should be completely full! */ 439 for (order = max_order; order--; ) { 440 block = i915_buddy_alloc(&mm, order); 441 if (!IS_ERR(block)) { 442 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", 443 order); 444 list_add_tail(&block->link, &blocks); 445 err = -EINVAL; 446 goto err; 447 } 448 } 449 450 block = list_last_entry(&blocks, typeof(*block), link); 451 list_del(&block->link); 452 i915_buddy_free(&mm, block); 453 454 /* As we free in increasing size, we make available larger blocks */ 455 order = 1; 456 list_for_each_entry_safe(block, bn, &blocks, link) { 457 list_del(&block->link); 458 i915_buddy_free(&mm, block); 459 460 block = i915_buddy_alloc(&mm, order); 461 if (IS_ERR(block)) { 462 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", 463 order); 464 err = PTR_ERR(block); 465 goto err; 466 } 467 i915_buddy_free(&mm, block); 468 order++; 469 } 470 471 /* To confirm, now the whole mm should be available */ 472 block = i915_buddy_alloc(&mm, max_order); 473 if (IS_ERR(block)) { 474 pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", 475 max_order); 476 err = PTR_ERR(block); 477 goto err; 478 } 479 i915_buddy_free(&mm, block); 480 481 err: 482 i915_buddy_free_list(&mm, &blocks); 483 i915_buddy_fini(&mm); 484 return err; 485 } 486 487 static int igt_buddy_alloc_optimistic(void *arg) 488 { 489 const int max_order = 16; 490 struct i915_buddy_block *block; 491 struct i915_buddy_mm mm; 492 LIST_HEAD(blocks); 493 int order; 494 int err; 495 496 /* 497 * Create a mm with one block of each order available, and 498 * try to allocate them all. 499 */ 500 501 err = i915_buddy_init(&mm, 502 PAGE_SIZE * ((1 << (max_order + 1)) - 1), 503 PAGE_SIZE); 504 if (err) { 505 pr_err("buddy_init failed(%d)\n", err); 506 return err; 507 } 508 GEM_BUG_ON(mm.max_order != max_order); 509 510 for (order = 0; order <= max_order; order++) { 511 block = i915_buddy_alloc(&mm, order); 512 if (IS_ERR(block)) { 513 pr_info("buddy_alloc hit -ENOMEM with order=%d\n", 514 order); 515 err = PTR_ERR(block); 516 goto err; 517 } 518 519 list_add_tail(&block->link, &blocks); 520 } 521 522 /* Should be completely full! */ 523 block = i915_buddy_alloc(&mm, 0); 524 if (!IS_ERR(block)) { 525 pr_info("buddy_alloc unexpectedly succeeded, it should be full!"); 526 list_add_tail(&block->link, &blocks); 527 err = -EINVAL; 528 goto err; 529 } 530 531 err: 532 i915_buddy_free_list(&mm, &blocks); 533 i915_buddy_fini(&mm); 534 return err; 535 } 536 537 static int igt_buddy_alloc_pathological(void *arg) 538 { 539 const int max_order = 16; 540 struct i915_buddy_block *block; 541 struct i915_buddy_mm mm; 542 LIST_HEAD(blocks); 543 LIST_HEAD(holes); 544 int order, top; 545 int err; 546 547 /* 548 * Create a pot-sized mm, then allocate one of each possible 549 * order within. This should leave the mm with exactly one 550 * page left. Free the largest block, then whittle down again. 551 * Eventually we will have a fully 50% fragmented mm. 552 */ 553 554 err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE); 555 if (err) { 556 pr_err("buddy_init failed(%d)\n", err); 557 return err; 558 } 559 GEM_BUG_ON(mm.max_order != max_order); 560 561 for (top = max_order; top; top--) { 562 /* Make room by freeing the largest allocated block */ 563 block = list_first_entry_or_null(&blocks, typeof(*block), link); 564 if (block) { 565 list_del(&block->link); 566 i915_buddy_free(&mm, block); 567 } 568 569 for (order = top; order--; ) { 570 block = i915_buddy_alloc(&mm, order); 571 if (IS_ERR(block)) { 572 pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n", 573 order, top); 574 err = PTR_ERR(block); 575 goto err; 576 } 577 list_add_tail(&block->link, &blocks); 578 } 579 580 /* There should be one final page for this sub-allocation */ 581 block = i915_buddy_alloc(&mm, 0); 582 if (IS_ERR(block)) { 583 pr_info("buddy_alloc hit -ENOMEM for hole\n"); 584 err = PTR_ERR(block); 585 goto err; 586 } 587 list_add_tail(&block->link, &holes); 588 589 block = i915_buddy_alloc(&mm, top); 590 if (!IS_ERR(block)) { 591 pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", 592 top, max_order); 593 list_add_tail(&block->link, &blocks); 594 err = -EINVAL; 595 goto err; 596 } 597 } 598 599 i915_buddy_free_list(&mm, &holes); 600 601 /* Nothing larger than blocks of chunk_size now available */ 602 for (order = 1; order <= max_order; order++) { 603 block = i915_buddy_alloc(&mm, order); 604 if (!IS_ERR(block)) { 605 pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", 606 order); 607 list_add_tail(&block->link, &blocks); 608 err = -EINVAL; 609 goto err; 610 } 611 } 612 613 err: 614 list_splice_tail(&holes, &blocks); 615 i915_buddy_free_list(&mm, &blocks); 616 i915_buddy_fini(&mm); 617 return err; 618 } 619 620 static int igt_buddy_alloc_range(void *arg) 621 { 622 struct i915_buddy_mm mm; 623 unsigned long page_num; 624 LIST_HEAD(blocks); 625 u64 chunk_size; 626 u64 offset; 627 u64 size; 628 u64 rem; 629 int err; 630 631 igt_mm_config(&size, &chunk_size); 632 633 pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size); 634 635 err = i915_buddy_init(&mm, size, chunk_size); 636 if (err) { 637 pr_err("buddy_init failed(%d)\n", err); 638 return err; 639 } 640 641 err = igt_check_mm(&mm); 642 if (err) { 643 pr_err("pre-mm check failed, abort, abort, abort!\n"); 644 goto err_fini; 645 } 646 647 rem = mm.size; 648 offset = 0; 649 650 for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) { 651 struct i915_buddy_block *block; 652 LIST_HEAD(tmp); 653 654 size = min(page_num * mm.chunk_size, rem); 655 656 err = i915_buddy_alloc_range(&mm, &tmp, offset, size); 657 if (err) { 658 if (err == -ENOMEM) { 659 pr_info("alloc_range hit -ENOMEM with size=%llx\n", 660 size); 661 } else { 662 pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n", 663 offset, size, err); 664 } 665 666 break; 667 } 668 669 block = list_first_entry_or_null(&tmp, 670 struct i915_buddy_block, 671 link); 672 if (!block) { 673 pr_err("alloc_range has no blocks\n"); 674 err = -EINVAL; 675 break; 676 } 677 678 if (i915_buddy_block_offset(block) != offset) { 679 pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n", 680 i915_buddy_block_offset(block), offset); 681 err = -EINVAL; 682 } 683 684 if (!err) 685 err = igt_check_blocks(&mm, &tmp, size, true); 686 687 list_splice_tail(&tmp, &blocks); 688 689 if (err) 690 break; 691 692 offset += size; 693 694 rem -= size; 695 if (!rem) 696 break; 697 698 cond_resched(); 699 } 700 701 if (err == -ENOMEM) 702 err = 0; 703 704 i915_buddy_free_list(&mm, &blocks); 705 706 if (!err) { 707 err = igt_check_mm(&mm); 708 if (err) 709 pr_err("post-mm check failed\n"); 710 } 711 712 err_fini: 713 i915_buddy_fini(&mm); 714 715 return err; 716 } 717 718 int i915_buddy_mock_selftests(void) 719 { 720 static const struct i915_subtest tests[] = { 721 SUBTEST(igt_buddy_alloc_pessimistic), 722 SUBTEST(igt_buddy_alloc_optimistic), 723 SUBTEST(igt_buddy_alloc_pathological), 724 SUBTEST(igt_buddy_alloc_smoke), 725 SUBTEST(igt_buddy_alloc_range), 726 }; 727 728 return i915_subtests(tests, NULL); 729 } 730