1 /* $NetBSD: radeon_test.c,v 1.5 2021/12/18 23:45:43 riastradh Exp $ */ 2 3 // SPDX-License-Identifier: GPL-2.0 OR MIT 4 /* 5 * Copyright 2009 VMware, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: Michel Dnzer 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: radeon_test.c,v 1.5 2021/12/18 23:45:43 riastradh Exp $"); 30 31 #include <drm/radeon_drm.h> 32 #include "radeon_reg.h" 33 #include "radeon.h" 34 35 #define RADEON_TEST_COPY_BLIT 1 36 #define RADEON_TEST_COPY_DMA 0 37 38 39 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 40 static void radeon_do_test_moves(struct radeon_device *rdev, int flag) 41 { 42 struct radeon_bo *vram_obj = NULL; 43 struct radeon_bo **gtt_obj = NULL; 44 uint64_t gtt_addr, vram_addr; 45 unsigned n, size; 46 int i, r, ring; 47 48 switch (flag) { 49 case RADEON_TEST_COPY_DMA: 50 ring = radeon_copy_dma_ring_index(rdev); 51 break; 52 case RADEON_TEST_COPY_BLIT: 53 ring = radeon_copy_blit_ring_index(rdev); 54 break; 55 default: 56 DRM_ERROR("Unknown copy method\n"); 57 return; 58 } 59 60 size = 1024 * 1024; 61 62 /* Number of tests = 63 * (Total GTT - IB pool - writeback page - ring buffers) / test size 64 */ 65 n = rdev->mc.gtt_size - rdev->gart_pin_size; 66 n /= size; 67 68 gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); 69 if (!gtt_obj) { 70 DRM_ERROR("Failed to allocate %d pointers\n", n); 71 r = 1; 72 goto out_cleanup; 73 } 74 75 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 76 0, NULL, NULL, &vram_obj); 77 if (r) { 78 DRM_ERROR("Failed to create VRAM object\n"); 79 goto out_cleanup; 80 } 81 r = radeon_bo_reserve(vram_obj, false); 82 if (unlikely(r != 0)) 83 goto out_unref; 84 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 85 if (r) { 86 DRM_ERROR("Failed to pin VRAM object\n"); 87 goto out_unres; 88 } 89 for (i = 0; i < n; i++) { 90 void *gtt_map, *vram_map; 91 void **gtt_start, **gtt_end; 92 void **vram_start, **vram_end; 93 struct radeon_fence *fence = NULL; 94 95 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 96 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, 97 gtt_obj + i); 98 if (r) { 99 DRM_ERROR("Failed to create GTT object %d\n", i); 100 goto out_lclean; 101 } 102 103 r = radeon_bo_reserve(gtt_obj[i], false); 104 if (unlikely(r != 0)) 105 goto out_lclean_unref; 106 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); 107 if (r) { 108 DRM_ERROR("Failed to pin GTT object %d\n", i); 109 goto out_lclean_unres; 110 } 111 112 r = radeon_bo_kmap(gtt_obj[i], >t_map); 113 if (r) { 114 DRM_ERROR("Failed to map GTT object %d\n", i); 115 goto out_lclean_unpin; 116 } 117 118 for (gtt_start = gtt_map, gtt_end = gtt_map + size; 119 gtt_start < gtt_end; 120 gtt_start++) 121 *gtt_start = gtt_start; 122 123 radeon_bo_kunmap(gtt_obj[i]); 124 125 if (ring == R600_RING_TYPE_DMA_INDEX) 126 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 127 size / RADEON_GPU_PAGE_SIZE, 128 vram_obj->tbo.base.resv); 129 else 130 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 131 size / RADEON_GPU_PAGE_SIZE, 132 vram_obj->tbo.base.resv); 133 if (IS_ERR(fence)) { 134 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 135 r = PTR_ERR(fence); 136 goto out_lclean_unpin; 137 } 138 139 r = radeon_fence_wait(fence, false); 140 if (r) { 141 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 142 goto out_lclean_unpin; 143 } 144 145 radeon_fence_unref(&fence); 146 147 r = radeon_bo_kmap(vram_obj, &vram_map); 148 if (r) { 149 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 150 goto out_lclean_unpin; 151 } 152 153 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 154 vram_start = vram_map, vram_end = vram_map + size; 155 vram_start < vram_end; 156 gtt_start++, vram_start++) { 157 if (*vram_start != gtt_start) { 158 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 159 "expected 0x%p (GTT/VRAM offset " 160 "0x%16llx/0x%16llx)\n", 161 i, *vram_start, gtt_start, 162 (unsigned long long) 163 (gtt_addr - rdev->mc.gtt_start + 164 (void*)gtt_start - gtt_map), 165 (unsigned long long) 166 (vram_addr - rdev->mc.vram_start + 167 (void*)gtt_start - gtt_map)); 168 radeon_bo_kunmap(vram_obj); 169 goto out_lclean_unpin; 170 } 171 *vram_start = vram_start; 172 } 173 174 radeon_bo_kunmap(vram_obj); 175 176 if (ring == R600_RING_TYPE_DMA_INDEX) 177 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 178 size / RADEON_GPU_PAGE_SIZE, 179 vram_obj->tbo.base.resv); 180 else 181 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 182 size / RADEON_GPU_PAGE_SIZE, 183 vram_obj->tbo.base.resv); 184 if (IS_ERR(fence)) { 185 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 186 r = PTR_ERR(fence); 187 goto out_lclean_unpin; 188 } 189 190 r = radeon_fence_wait(fence, false); 191 if (r) { 192 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 193 goto out_lclean_unpin; 194 } 195 196 radeon_fence_unref(&fence); 197 198 r = radeon_bo_kmap(gtt_obj[i], >t_map); 199 if (r) { 200 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 201 goto out_lclean_unpin; 202 } 203 204 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 205 vram_start = vram_map, vram_end = vram_map + size; 206 gtt_start < gtt_end; 207 gtt_start++, vram_start++) { 208 if (*gtt_start != vram_start) { 209 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 210 "expected 0x%p (VRAM/GTT offset " 211 "0x%16llx/0x%16llx)\n", 212 i, *gtt_start, vram_start, 213 (unsigned long long) 214 (vram_addr - rdev->mc.vram_start + 215 (void*)vram_start - vram_map), 216 (unsigned long long) 217 (gtt_addr - rdev->mc.gtt_start + 218 (void*)vram_start - vram_map)); 219 radeon_bo_kunmap(gtt_obj[i]); 220 goto out_lclean_unpin; 221 } 222 } 223 224 radeon_bo_kunmap(gtt_obj[i]); 225 226 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%"PRIx64"\n", 227 gtt_addr - rdev->mc.gtt_start); 228 continue; 229 230 out_lclean_unpin: 231 radeon_bo_unpin(gtt_obj[i]); 232 out_lclean_unres: 233 radeon_bo_unreserve(gtt_obj[i]); 234 out_lclean_unref: 235 radeon_bo_unref(>t_obj[i]); 236 out_lclean: 237 for (--i; i >= 0; --i) { 238 radeon_bo_unpin(gtt_obj[i]); 239 radeon_bo_unreserve(gtt_obj[i]); 240 radeon_bo_unref(>t_obj[i]); 241 } 242 if (fence && !IS_ERR(fence)) 243 radeon_fence_unref(&fence); 244 break; 245 } 246 247 radeon_bo_unpin(vram_obj); 248 out_unres: 249 radeon_bo_unreserve(vram_obj); 250 out_unref: 251 radeon_bo_unref(&vram_obj); 252 out_cleanup: 253 kfree(gtt_obj); 254 if (r) { 255 pr_warn("Error while testing BO move\n"); 256 } 257 } 258 259 void radeon_test_moves(struct radeon_device *rdev) 260 { 261 if (rdev->asic->copy.dma) 262 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); 263 if (rdev->asic->copy.blit) 264 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 265 } 266 267 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, 268 struct radeon_ring *ring, 269 struct radeon_fence **fence) 270 { 271 uint32_t handle = ring->idx ^ 0xdeafbeef; 272 int r; 273 274 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 275 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); 276 if (r) { 277 DRM_ERROR("Failed to get dummy create msg\n"); 278 return r; 279 } 280 281 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); 282 if (r) { 283 DRM_ERROR("Failed to get dummy destroy msg\n"); 284 return r; 285 } 286 287 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || 288 ring->idx == TN_RING_TYPE_VCE2_INDEX) { 289 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); 290 if (r) { 291 DRM_ERROR("Failed to get dummy create msg\n"); 292 return r; 293 } 294 295 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); 296 if (r) { 297 DRM_ERROR("Failed to get dummy destroy msg\n"); 298 return r; 299 } 300 301 } else { 302 r = radeon_ring_lock(rdev, ring, 64); 303 if (r) { 304 DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 305 return r; 306 } 307 r = radeon_fence_emit(rdev, fence, ring->idx); 308 if (r) { 309 DRM_ERROR("Failed to emit fence\n"); 310 radeon_ring_unlock_undo(rdev, ring); 311 return r; 312 } 313 radeon_ring_unlock_commit(rdev, ring, false); 314 } 315 return 0; 316 } 317 318 void radeon_test_ring_sync(struct radeon_device *rdev, 319 struct radeon_ring *ringA, 320 struct radeon_ring *ringB) 321 { 322 struct radeon_fence *fence1 = NULL, *fence2 = NULL; 323 struct radeon_semaphore *semaphore = NULL; 324 int r; 325 326 r = radeon_semaphore_create(rdev, &semaphore); 327 if (r) { 328 DRM_ERROR("Failed to create semaphore\n"); 329 goto out_cleanup; 330 } 331 332 r = radeon_ring_lock(rdev, ringA, 64); 333 if (r) { 334 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 335 goto out_cleanup; 336 } 337 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 338 radeon_ring_unlock_commit(rdev, ringA, false); 339 340 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 341 if (r) 342 goto out_cleanup; 343 344 r = radeon_ring_lock(rdev, ringA, 64); 345 if (r) { 346 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 347 goto out_cleanup; 348 } 349 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 350 radeon_ring_unlock_commit(rdev, ringA, false); 351 352 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 353 if (r) 354 goto out_cleanup; 355 356 msleep(1000); 357 358 if (radeon_fence_signaled(fence1)) { 359 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 360 goto out_cleanup; 361 } 362 363 r = radeon_ring_lock(rdev, ringB, 64); 364 if (r) { 365 DRM_ERROR("Failed to lock ring B %p\n", ringB); 366 goto out_cleanup; 367 } 368 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 369 radeon_ring_unlock_commit(rdev, ringB, false); 370 371 r = radeon_fence_wait(fence1, false); 372 if (r) { 373 DRM_ERROR("Failed to wait for sync fence 1\n"); 374 goto out_cleanup; 375 } 376 377 msleep(1000); 378 379 if (radeon_fence_signaled(fence2)) { 380 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 381 goto out_cleanup; 382 } 383 384 r = radeon_ring_lock(rdev, ringB, 64); 385 if (r) { 386 DRM_ERROR("Failed to lock ring B %p\n", ringB); 387 goto out_cleanup; 388 } 389 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 390 radeon_ring_unlock_commit(rdev, ringB, false); 391 392 r = radeon_fence_wait(fence2, false); 393 if (r) { 394 DRM_ERROR("Failed to wait for sync fence 1\n"); 395 goto out_cleanup; 396 } 397 398 out_cleanup: 399 radeon_semaphore_free(rdev, &semaphore, NULL); 400 401 if (fence1) 402 radeon_fence_unref(&fence1); 403 404 if (fence2) 405 radeon_fence_unref(&fence2); 406 407 if (r) 408 pr_warn("Error while testing ring sync (%d)\n", r); 409 } 410 411 static void radeon_test_ring_sync2(struct radeon_device *rdev, 412 struct radeon_ring *ringA, 413 struct radeon_ring *ringB, 414 struct radeon_ring *ringC) 415 { 416 struct radeon_fence *fenceA = NULL, *fenceB = NULL; 417 struct radeon_semaphore *semaphore = NULL; 418 bool sigA, sigB; 419 int i, r; 420 421 r = radeon_semaphore_create(rdev, &semaphore); 422 if (r) { 423 DRM_ERROR("Failed to create semaphore\n"); 424 goto out_cleanup; 425 } 426 427 r = radeon_ring_lock(rdev, ringA, 64); 428 if (r) { 429 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 430 goto out_cleanup; 431 } 432 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 433 radeon_ring_unlock_commit(rdev, ringA, false); 434 435 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 436 if (r) 437 goto out_cleanup; 438 439 r = radeon_ring_lock(rdev, ringB, 64); 440 if (r) { 441 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 442 goto out_cleanup; 443 } 444 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 445 radeon_ring_unlock_commit(rdev, ringB, false); 446 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 447 if (r) 448 goto out_cleanup; 449 450 msleep(1000); 451 452 if (radeon_fence_signaled(fenceA)) { 453 DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 454 goto out_cleanup; 455 } 456 if (radeon_fence_signaled(fenceB)) { 457 DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 458 goto out_cleanup; 459 } 460 461 r = radeon_ring_lock(rdev, ringC, 64); 462 if (r) { 463 DRM_ERROR("Failed to lock ring B %p\n", ringC); 464 goto out_cleanup; 465 } 466 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 467 radeon_ring_unlock_commit(rdev, ringC, false); 468 469 for (i = 0; i < 30; ++i) { 470 msleep(100); 471 sigA = radeon_fence_signaled(fenceA); 472 sigB = radeon_fence_signaled(fenceB); 473 if (sigA || sigB) 474 break; 475 } 476 477 if (!sigA && !sigB) { 478 DRM_ERROR("Neither fence A nor B has been signaled\n"); 479 goto out_cleanup; 480 } else if (sigA && sigB) { 481 DRM_ERROR("Both fence A and B has been signaled\n"); 482 goto out_cleanup; 483 } 484 485 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); 486 487 r = radeon_ring_lock(rdev, ringC, 64); 488 if (r) { 489 DRM_ERROR("Failed to lock ring B %p\n", ringC); 490 goto out_cleanup; 491 } 492 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 493 radeon_ring_unlock_commit(rdev, ringC, false); 494 495 msleep(1000); 496 497 r = radeon_fence_wait(fenceA, false); 498 if (r) { 499 DRM_ERROR("Failed to wait for sync fence A\n"); 500 goto out_cleanup; 501 } 502 r = radeon_fence_wait(fenceB, false); 503 if (r) { 504 DRM_ERROR("Failed to wait for sync fence B\n"); 505 goto out_cleanup; 506 } 507 508 out_cleanup: 509 radeon_semaphore_free(rdev, &semaphore, NULL); 510 511 if (fenceA) 512 radeon_fence_unref(&fenceA); 513 514 if (fenceB) 515 radeon_fence_unref(&fenceB); 516 517 if (r) 518 pr_warn("Error while testing ring sync (%d)\n", r); 519 } 520 521 static bool radeon_test_sync_possible(struct radeon_ring *ringA, 522 struct radeon_ring *ringB) 523 { 524 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && 525 ringB->idx == TN_RING_TYPE_VCE1_INDEX) 526 return false; 527 528 return true; 529 } 530 531 void radeon_test_syncing(struct radeon_device *rdev) 532 { 533 int i, j, k; 534 535 for (i = 1; i < RADEON_NUM_RINGS; ++i) { 536 struct radeon_ring *ringA = &rdev->ring[i]; 537 if (!ringA->ready) 538 continue; 539 540 for (j = 0; j < i; ++j) { 541 struct radeon_ring *ringB = &rdev->ring[j]; 542 if (!ringB->ready) 543 continue; 544 545 if (!radeon_test_sync_possible(ringA, ringB)) 546 continue; 547 548 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); 549 radeon_test_ring_sync(rdev, ringA, ringB); 550 551 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); 552 radeon_test_ring_sync(rdev, ringB, ringA); 553 554 for (k = 0; k < j; ++k) { 555 struct radeon_ring *ringC = &rdev->ring[k]; 556 if (!ringC->ready) 557 continue; 558 559 if (!radeon_test_sync_possible(ringA, ringC)) 560 continue; 561 562 if (!radeon_test_sync_possible(ringB, ringC)) 563 continue; 564 565 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); 566 radeon_test_ring_sync2(rdev, ringA, ringB, ringC); 567 568 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); 569 radeon_test_ring_sync2(rdev, ringA, ringC, ringB); 570 571 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); 572 radeon_test_ring_sync2(rdev, ringB, ringA, ringC); 573 574 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); 575 radeon_test_ring_sync2(rdev, ringB, ringC, ringA); 576 577 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); 578 radeon_test_ring_sync2(rdev, ringC, ringA, ringB); 579 580 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); 581 radeon_test_ring_sync2(rdev, ringC, ringB, ringA); 582 } 583 } 584 } 585 } 586