1 1.5 riastrad /* $NetBSD: radeon_test.c,v 1.5 2021/12/18 23:45:43 riastradh Exp $ */ 2 1.3 riastrad 3 1.5 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /* 5 1.1 riastrad * Copyright 2009 VMware, Inc. 6 1.1 riastrad * 7 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 8 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 9 1.1 riastrad * to deal in the Software without restriction, including without limitation 10 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 12 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 13 1.1 riastrad * 14 1.1 riastrad * The above copyright notice and this permission notice shall be included in 15 1.1 riastrad * all copies or substantial portions of the Software. 16 1.1 riastrad * 17 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 24 1.1 riastrad * 25 1.1 riastrad * Authors: Michel Dnzer 26 1.1 riastrad */ 27 1.5 riastrad 28 1.3 riastrad #include <sys/cdefs.h> 29 1.5 riastrad __KERNEL_RCSID(0, "$NetBSD: radeon_test.c,v 1.5 2021/12/18 23:45:43 riastradh Exp $"); 30 1.3 riastrad 31 1.1 riastrad #include <drm/radeon_drm.h> 32 1.1 riastrad #include "radeon_reg.h" 33 1.1 riastrad #include "radeon.h" 34 1.1 riastrad 35 1.1 riastrad #define RADEON_TEST_COPY_BLIT 1 36 1.1 riastrad #define RADEON_TEST_COPY_DMA 0 37 1.1 riastrad 38 1.1 riastrad 39 1.1 riastrad /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 40 1.1 riastrad static void radeon_do_test_moves(struct radeon_device *rdev, int flag) 41 1.1 riastrad { 42 1.1 riastrad struct radeon_bo *vram_obj = NULL; 43 1.1 riastrad struct radeon_bo **gtt_obj = NULL; 44 1.1 riastrad uint64_t gtt_addr, vram_addr; 45 1.1 riastrad unsigned n, size; 46 1.1 riastrad int i, r, ring; 47 1.1 riastrad 48 1.1 riastrad switch (flag) { 49 1.1 riastrad case RADEON_TEST_COPY_DMA: 50 1.1 riastrad ring = radeon_copy_dma_ring_index(rdev); 51 1.1 riastrad break; 52 1.1 riastrad case RADEON_TEST_COPY_BLIT: 53 1.1 riastrad ring = radeon_copy_blit_ring_index(rdev); 54 1.1 riastrad break; 55 1.1 riastrad default: 56 1.1 riastrad DRM_ERROR("Unknown copy method\n"); 57 1.1 riastrad return; 58 1.1 riastrad } 59 1.1 riastrad 60 1.1 riastrad size = 1024 * 1024; 61 1.1 riastrad 62 1.1 riastrad /* Number of tests = 63 1.1 riastrad * (Total GTT - IB pool - writeback page - ring buffers) / test size 64 1.1 riastrad */ 65 1.3 riastrad n = rdev->mc.gtt_size - rdev->gart_pin_size; 66 1.1 riastrad n /= size; 67 1.1 riastrad 68 1.5 riastrad gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); 69 1.1 riastrad if (!gtt_obj) { 70 1.1 riastrad DRM_ERROR("Failed to allocate %d pointers\n", n); 71 1.1 riastrad r = 1; 72 1.1 riastrad goto out_cleanup; 73 1.1 riastrad } 74 1.1 riastrad 75 1.1 riastrad r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 76 1.3 riastrad 0, NULL, NULL, &vram_obj); 77 1.1 riastrad if (r) { 78 1.1 riastrad DRM_ERROR("Failed to create VRAM object\n"); 79 1.1 riastrad goto out_cleanup; 80 1.1 riastrad } 81 1.1 riastrad r = radeon_bo_reserve(vram_obj, false); 82 1.1 riastrad if (unlikely(r != 0)) 83 1.1 riastrad goto out_unref; 84 1.1 riastrad r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 85 1.1 riastrad if (r) { 86 1.1 riastrad DRM_ERROR("Failed to pin VRAM object\n"); 87 1.1 riastrad goto out_unres; 88 1.1 riastrad } 89 1.1 riastrad for (i = 0; i < n; i++) { 90 1.1 riastrad void *gtt_map, *vram_map; 91 1.1 riastrad void **gtt_start, **gtt_end; 92 1.1 riastrad void **vram_start, **vram_end; 93 1.1 riastrad struct radeon_fence *fence = NULL; 94 1.1 riastrad 95 1.1 riastrad r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 96 1.3 riastrad RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, 97 1.3 riastrad gtt_obj + i); 98 1.1 riastrad if (r) { 99 1.1 riastrad DRM_ERROR("Failed to create GTT object %d\n", i); 100 1.1 riastrad goto out_lclean; 101 1.1 riastrad } 102 1.1 riastrad 103 1.1 riastrad r = radeon_bo_reserve(gtt_obj[i], false); 104 1.1 riastrad if (unlikely(r != 0)) 105 1.1 riastrad goto out_lclean_unref; 106 1.1 riastrad r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); 107 1.1 riastrad if (r) { 108 1.1 riastrad DRM_ERROR("Failed to pin GTT object %d\n", i); 109 1.1 riastrad goto out_lclean_unres; 110 1.1 riastrad } 111 1.1 riastrad 112 1.1 riastrad r = radeon_bo_kmap(gtt_obj[i], >t_map); 113 1.1 riastrad if (r) { 114 1.1 riastrad DRM_ERROR("Failed to map GTT object %d\n", i); 115 1.1 riastrad goto out_lclean_unpin; 116 1.1 riastrad } 117 1.1 riastrad 118 1.4 riastrad for (gtt_start = gtt_map, gtt_end = gtt_map + size; 119 1.1 riastrad gtt_start < gtt_end; 120 1.1 riastrad gtt_start++) 121 1.1 riastrad *gtt_start = gtt_start; 122 1.1 riastrad 123 1.1 riastrad radeon_bo_kunmap(gtt_obj[i]); 124 1.1 riastrad 125 1.1 riastrad if (ring == R600_RING_TYPE_DMA_INDEX) 126 1.3 riastrad fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, 127 1.3 riastrad size / RADEON_GPU_PAGE_SIZE, 128 1.5 riastrad vram_obj->tbo.base.resv); 129 1.1 riastrad else 130 1.3 riastrad fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, 131 1.3 riastrad size / RADEON_GPU_PAGE_SIZE, 132 1.5 riastrad vram_obj->tbo.base.resv); 133 1.3 riastrad if (IS_ERR(fence)) { 134 1.1 riastrad DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 135 1.3 riastrad r = PTR_ERR(fence); 136 1.1 riastrad goto out_lclean_unpin; 137 1.1 riastrad } 138 1.1 riastrad 139 1.1 riastrad r = radeon_fence_wait(fence, false); 140 1.1 riastrad if (r) { 141 1.1 riastrad DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 142 1.1 riastrad goto out_lclean_unpin; 143 1.1 riastrad } 144 1.1 riastrad 145 1.1 riastrad radeon_fence_unref(&fence); 146 1.1 riastrad 147 1.1 riastrad r = radeon_bo_kmap(vram_obj, &vram_map); 148 1.1 riastrad if (r) { 149 1.1 riastrad DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 150 1.1 riastrad goto out_lclean_unpin; 151 1.1 riastrad } 152 1.1 riastrad 153 1.4 riastrad for (gtt_start = gtt_map, gtt_end = gtt_map + size, 154 1.4 riastrad vram_start = vram_map, vram_end = vram_map + size; 155 1.1 riastrad vram_start < vram_end; 156 1.1 riastrad gtt_start++, vram_start++) { 157 1.1 riastrad if (*vram_start != gtt_start) { 158 1.1 riastrad DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 159 1.1 riastrad "expected 0x%p (GTT/VRAM offset " 160 1.1 riastrad "0x%16llx/0x%16llx)\n", 161 1.1 riastrad i, *vram_start, gtt_start, 162 1.1 riastrad (unsigned long long) 163 1.1 riastrad (gtt_addr - rdev->mc.gtt_start + 164 1.4 riastrad (void*)gtt_start - gtt_map), 165 1.1 riastrad (unsigned long long) 166 1.1 riastrad (vram_addr - rdev->mc.vram_start + 167 1.4 riastrad (void*)gtt_start - gtt_map)); 168 1.1 riastrad radeon_bo_kunmap(vram_obj); 169 1.1 riastrad goto out_lclean_unpin; 170 1.1 riastrad } 171 1.1 riastrad *vram_start = vram_start; 172 1.1 riastrad } 173 1.1 riastrad 174 1.1 riastrad radeon_bo_kunmap(vram_obj); 175 1.1 riastrad 176 1.1 riastrad if (ring == R600_RING_TYPE_DMA_INDEX) 177 1.3 riastrad fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, 178 1.3 riastrad size / RADEON_GPU_PAGE_SIZE, 179 1.5 riastrad vram_obj->tbo.base.resv); 180 1.1 riastrad else 181 1.3 riastrad fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, 182 1.3 riastrad size / RADEON_GPU_PAGE_SIZE, 183 1.5 riastrad vram_obj->tbo.base.resv); 184 1.3 riastrad if (IS_ERR(fence)) { 185 1.1 riastrad DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 186 1.3 riastrad r = PTR_ERR(fence); 187 1.1 riastrad goto out_lclean_unpin; 188 1.1 riastrad } 189 1.1 riastrad 190 1.1 riastrad r = radeon_fence_wait(fence, false); 191 1.1 riastrad if (r) { 192 1.1 riastrad DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 193 1.1 riastrad goto out_lclean_unpin; 194 1.1 riastrad } 195 1.1 riastrad 196 1.1 riastrad radeon_fence_unref(&fence); 197 1.1 riastrad 198 1.1 riastrad r = radeon_bo_kmap(gtt_obj[i], >t_map); 199 1.1 riastrad if (r) { 200 1.1 riastrad DRM_ERROR("Failed to map GTT object after copy %d\n", i); 201 1.1 riastrad goto out_lclean_unpin; 202 1.1 riastrad } 203 1.1 riastrad 204 1.4 riastrad for (gtt_start = gtt_map, gtt_end = gtt_map + size, 205 1.4 riastrad vram_start = vram_map, vram_end = vram_map + size; 206 1.1 riastrad gtt_start < gtt_end; 207 1.1 riastrad gtt_start++, vram_start++) { 208 1.1 riastrad if (*gtt_start != vram_start) { 209 1.1 riastrad DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 210 1.1 riastrad "expected 0x%p (VRAM/GTT offset " 211 1.1 riastrad "0x%16llx/0x%16llx)\n", 212 1.1 riastrad i, *gtt_start, vram_start, 213 1.1 riastrad (unsigned long long) 214 1.1 riastrad (vram_addr - rdev->mc.vram_start + 215 1.4 riastrad (void*)vram_start - vram_map), 216 1.1 riastrad (unsigned long long) 217 1.1 riastrad (gtt_addr - rdev->mc.gtt_start + 218 1.4 riastrad (void*)vram_start - vram_map)); 219 1.1 riastrad radeon_bo_kunmap(gtt_obj[i]); 220 1.1 riastrad goto out_lclean_unpin; 221 1.1 riastrad } 222 1.1 riastrad } 223 1.1 riastrad 224 1.1 riastrad radeon_bo_kunmap(gtt_obj[i]); 225 1.1 riastrad 226 1.2 riastrad DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%"PRIx64"\n", 227 1.1 riastrad gtt_addr - rdev->mc.gtt_start); 228 1.1 riastrad continue; 229 1.1 riastrad 230 1.1 riastrad out_lclean_unpin: 231 1.1 riastrad radeon_bo_unpin(gtt_obj[i]); 232 1.1 riastrad out_lclean_unres: 233 1.1 riastrad radeon_bo_unreserve(gtt_obj[i]); 234 1.1 riastrad out_lclean_unref: 235 1.1 riastrad radeon_bo_unref(>t_obj[i]); 236 1.1 riastrad out_lclean: 237 1.1 riastrad for (--i; i >= 0; --i) { 238 1.1 riastrad radeon_bo_unpin(gtt_obj[i]); 239 1.1 riastrad radeon_bo_unreserve(gtt_obj[i]); 240 1.1 riastrad radeon_bo_unref(>t_obj[i]); 241 1.1 riastrad } 242 1.3 riastrad if (fence && !IS_ERR(fence)) 243 1.1 riastrad radeon_fence_unref(&fence); 244 1.1 riastrad break; 245 1.1 riastrad } 246 1.1 riastrad 247 1.1 riastrad radeon_bo_unpin(vram_obj); 248 1.1 riastrad out_unres: 249 1.1 riastrad radeon_bo_unreserve(vram_obj); 250 1.1 riastrad out_unref: 251 1.1 riastrad radeon_bo_unref(&vram_obj); 252 1.1 riastrad out_cleanup: 253 1.1 riastrad kfree(gtt_obj); 254 1.1 riastrad if (r) { 255 1.5 riastrad pr_warn("Error while testing BO move\n"); 256 1.1 riastrad } 257 1.1 riastrad } 258 1.1 riastrad 259 1.1 riastrad void radeon_test_moves(struct radeon_device *rdev) 260 1.1 riastrad { 261 1.1 riastrad if (rdev->asic->copy.dma) 262 1.1 riastrad radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); 263 1.1 riastrad if (rdev->asic->copy.blit) 264 1.1 riastrad radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); 265 1.1 riastrad } 266 1.1 riastrad 267 1.1 riastrad static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, 268 1.1 riastrad struct radeon_ring *ring, 269 1.1 riastrad struct radeon_fence **fence) 270 1.1 riastrad { 271 1.1 riastrad uint32_t handle = ring->idx ^ 0xdeafbeef; 272 1.1 riastrad int r; 273 1.1 riastrad 274 1.1 riastrad if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 275 1.1 riastrad r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); 276 1.1 riastrad if (r) { 277 1.1 riastrad DRM_ERROR("Failed to get dummy create msg\n"); 278 1.1 riastrad return r; 279 1.1 riastrad } 280 1.1 riastrad 281 1.1 riastrad r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); 282 1.1 riastrad if (r) { 283 1.1 riastrad DRM_ERROR("Failed to get dummy destroy msg\n"); 284 1.1 riastrad return r; 285 1.1 riastrad } 286 1.1 riastrad 287 1.1 riastrad } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || 288 1.1 riastrad ring->idx == TN_RING_TYPE_VCE2_INDEX) { 289 1.1 riastrad r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); 290 1.1 riastrad if (r) { 291 1.1 riastrad DRM_ERROR("Failed to get dummy create msg\n"); 292 1.1 riastrad return r; 293 1.1 riastrad } 294 1.1 riastrad 295 1.1 riastrad r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); 296 1.1 riastrad if (r) { 297 1.1 riastrad DRM_ERROR("Failed to get dummy destroy msg\n"); 298 1.1 riastrad return r; 299 1.1 riastrad } 300 1.1 riastrad 301 1.1 riastrad } else { 302 1.1 riastrad r = radeon_ring_lock(rdev, ring, 64); 303 1.1 riastrad if (r) { 304 1.1 riastrad DRM_ERROR("Failed to lock ring A %d\n", ring->idx); 305 1.1 riastrad return r; 306 1.1 riastrad } 307 1.5 riastrad r = radeon_fence_emit(rdev, fence, ring->idx); 308 1.5 riastrad if (r) { 309 1.5 riastrad DRM_ERROR("Failed to emit fence\n"); 310 1.5 riastrad radeon_ring_unlock_undo(rdev, ring); 311 1.5 riastrad return r; 312 1.5 riastrad } 313 1.3 riastrad radeon_ring_unlock_commit(rdev, ring, false); 314 1.1 riastrad } 315 1.1 riastrad return 0; 316 1.1 riastrad } 317 1.1 riastrad 318 1.1 riastrad void radeon_test_ring_sync(struct radeon_device *rdev, 319 1.1 riastrad struct radeon_ring *ringA, 320 1.1 riastrad struct radeon_ring *ringB) 321 1.1 riastrad { 322 1.1 riastrad struct radeon_fence *fence1 = NULL, *fence2 = NULL; 323 1.1 riastrad struct radeon_semaphore *semaphore = NULL; 324 1.1 riastrad int r; 325 1.1 riastrad 326 1.1 riastrad r = radeon_semaphore_create(rdev, &semaphore); 327 1.1 riastrad if (r) { 328 1.1 riastrad DRM_ERROR("Failed to create semaphore\n"); 329 1.1 riastrad goto out_cleanup; 330 1.1 riastrad } 331 1.1 riastrad 332 1.1 riastrad r = radeon_ring_lock(rdev, ringA, 64); 333 1.1 riastrad if (r) { 334 1.1 riastrad DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 335 1.1 riastrad goto out_cleanup; 336 1.1 riastrad } 337 1.1 riastrad radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 338 1.3 riastrad radeon_ring_unlock_commit(rdev, ringA, false); 339 1.1 riastrad 340 1.1 riastrad r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 341 1.1 riastrad if (r) 342 1.1 riastrad goto out_cleanup; 343 1.1 riastrad 344 1.1 riastrad r = radeon_ring_lock(rdev, ringA, 64); 345 1.1 riastrad if (r) { 346 1.1 riastrad DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 347 1.1 riastrad goto out_cleanup; 348 1.1 riastrad } 349 1.1 riastrad radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 350 1.3 riastrad radeon_ring_unlock_commit(rdev, ringA, false); 351 1.1 riastrad 352 1.1 riastrad r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 353 1.1 riastrad if (r) 354 1.1 riastrad goto out_cleanup; 355 1.1 riastrad 356 1.5 riastrad msleep(1000); 357 1.1 riastrad 358 1.1 riastrad if (radeon_fence_signaled(fence1)) { 359 1.1 riastrad DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); 360 1.1 riastrad goto out_cleanup; 361 1.1 riastrad } 362 1.1 riastrad 363 1.1 riastrad r = radeon_ring_lock(rdev, ringB, 64); 364 1.1 riastrad if (r) { 365 1.1 riastrad DRM_ERROR("Failed to lock ring B %p\n", ringB); 366 1.1 riastrad goto out_cleanup; 367 1.1 riastrad } 368 1.1 riastrad radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 369 1.3 riastrad radeon_ring_unlock_commit(rdev, ringB, false); 370 1.1 riastrad 371 1.1 riastrad r = radeon_fence_wait(fence1, false); 372 1.1 riastrad if (r) { 373 1.1 riastrad DRM_ERROR("Failed to wait for sync fence 1\n"); 374 1.1 riastrad goto out_cleanup; 375 1.1 riastrad } 376 1.1 riastrad 377 1.5 riastrad msleep(1000); 378 1.1 riastrad 379 1.1 riastrad if (radeon_fence_signaled(fence2)) { 380 1.1 riastrad DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); 381 1.1 riastrad goto out_cleanup; 382 1.1 riastrad } 383 1.1 riastrad 384 1.1 riastrad r = radeon_ring_lock(rdev, ringB, 64); 385 1.1 riastrad if (r) { 386 1.1 riastrad DRM_ERROR("Failed to lock ring B %p\n", ringB); 387 1.1 riastrad goto out_cleanup; 388 1.1 riastrad } 389 1.1 riastrad radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 390 1.3 riastrad radeon_ring_unlock_commit(rdev, ringB, false); 391 1.1 riastrad 392 1.1 riastrad r = radeon_fence_wait(fence2, false); 393 1.1 riastrad if (r) { 394 1.1 riastrad DRM_ERROR("Failed to wait for sync fence 1\n"); 395 1.1 riastrad goto out_cleanup; 396 1.1 riastrad } 397 1.1 riastrad 398 1.1 riastrad out_cleanup: 399 1.1 riastrad radeon_semaphore_free(rdev, &semaphore, NULL); 400 1.1 riastrad 401 1.1 riastrad if (fence1) 402 1.1 riastrad radeon_fence_unref(&fence1); 403 1.1 riastrad 404 1.1 riastrad if (fence2) 405 1.1 riastrad radeon_fence_unref(&fence2); 406 1.1 riastrad 407 1.1 riastrad if (r) 408 1.5 riastrad pr_warn("Error while testing ring sync (%d)\n", r); 409 1.1 riastrad } 410 1.1 riastrad 411 1.1 riastrad static void radeon_test_ring_sync2(struct radeon_device *rdev, 412 1.1 riastrad struct radeon_ring *ringA, 413 1.1 riastrad struct radeon_ring *ringB, 414 1.1 riastrad struct radeon_ring *ringC) 415 1.1 riastrad { 416 1.1 riastrad struct radeon_fence *fenceA = NULL, *fenceB = NULL; 417 1.1 riastrad struct radeon_semaphore *semaphore = NULL; 418 1.1 riastrad bool sigA, sigB; 419 1.1 riastrad int i, r; 420 1.1 riastrad 421 1.1 riastrad r = radeon_semaphore_create(rdev, &semaphore); 422 1.1 riastrad if (r) { 423 1.1 riastrad DRM_ERROR("Failed to create semaphore\n"); 424 1.1 riastrad goto out_cleanup; 425 1.1 riastrad } 426 1.1 riastrad 427 1.1 riastrad r = radeon_ring_lock(rdev, ringA, 64); 428 1.1 riastrad if (r) { 429 1.1 riastrad DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); 430 1.1 riastrad goto out_cleanup; 431 1.1 riastrad } 432 1.1 riastrad radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 433 1.3 riastrad radeon_ring_unlock_commit(rdev, ringA, false); 434 1.1 riastrad 435 1.1 riastrad r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 436 1.1 riastrad if (r) 437 1.1 riastrad goto out_cleanup; 438 1.1 riastrad 439 1.1 riastrad r = radeon_ring_lock(rdev, ringB, 64); 440 1.1 riastrad if (r) { 441 1.1 riastrad DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); 442 1.1 riastrad goto out_cleanup; 443 1.1 riastrad } 444 1.1 riastrad radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 445 1.3 riastrad radeon_ring_unlock_commit(rdev, ringB, false); 446 1.1 riastrad r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 447 1.1 riastrad if (r) 448 1.1 riastrad goto out_cleanup; 449 1.1 riastrad 450 1.5 riastrad msleep(1000); 451 1.1 riastrad 452 1.1 riastrad if (radeon_fence_signaled(fenceA)) { 453 1.1 riastrad DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); 454 1.1 riastrad goto out_cleanup; 455 1.1 riastrad } 456 1.1 riastrad if (radeon_fence_signaled(fenceB)) { 457 1.1 riastrad DRM_ERROR("Fence B signaled without waiting for semaphore.\n"); 458 1.1 riastrad goto out_cleanup; 459 1.1 riastrad } 460 1.1 riastrad 461 1.1 riastrad r = radeon_ring_lock(rdev, ringC, 64); 462 1.1 riastrad if (r) { 463 1.1 riastrad DRM_ERROR("Failed to lock ring B %p\n", ringC); 464 1.1 riastrad goto out_cleanup; 465 1.1 riastrad } 466 1.1 riastrad radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 467 1.3 riastrad radeon_ring_unlock_commit(rdev, ringC, false); 468 1.1 riastrad 469 1.1 riastrad for (i = 0; i < 30; ++i) { 470 1.5 riastrad msleep(100); 471 1.1 riastrad sigA = radeon_fence_signaled(fenceA); 472 1.1 riastrad sigB = radeon_fence_signaled(fenceB); 473 1.1 riastrad if (sigA || sigB) 474 1.1 riastrad break; 475 1.1 riastrad } 476 1.1 riastrad 477 1.1 riastrad if (!sigA && !sigB) { 478 1.1 riastrad DRM_ERROR("Neither fence A nor B has been signaled\n"); 479 1.1 riastrad goto out_cleanup; 480 1.1 riastrad } else if (sigA && sigB) { 481 1.1 riastrad DRM_ERROR("Both fence A and B has been signaled\n"); 482 1.1 riastrad goto out_cleanup; 483 1.1 riastrad } 484 1.1 riastrad 485 1.1 riastrad DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B'); 486 1.1 riastrad 487 1.1 riastrad r = radeon_ring_lock(rdev, ringC, 64); 488 1.1 riastrad if (r) { 489 1.1 riastrad DRM_ERROR("Failed to lock ring B %p\n", ringC); 490 1.1 riastrad goto out_cleanup; 491 1.1 riastrad } 492 1.1 riastrad radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 493 1.3 riastrad radeon_ring_unlock_commit(rdev, ringC, false); 494 1.1 riastrad 495 1.5 riastrad msleep(1000); 496 1.1 riastrad 497 1.1 riastrad r = radeon_fence_wait(fenceA, false); 498 1.1 riastrad if (r) { 499 1.1 riastrad DRM_ERROR("Failed to wait for sync fence A\n"); 500 1.1 riastrad goto out_cleanup; 501 1.1 riastrad } 502 1.1 riastrad r = radeon_fence_wait(fenceB, false); 503 1.1 riastrad if (r) { 504 1.1 riastrad DRM_ERROR("Failed to wait for sync fence B\n"); 505 1.1 riastrad goto out_cleanup; 506 1.1 riastrad } 507 1.1 riastrad 508 1.1 riastrad out_cleanup: 509 1.1 riastrad radeon_semaphore_free(rdev, &semaphore, NULL); 510 1.1 riastrad 511 1.1 riastrad if (fenceA) 512 1.1 riastrad radeon_fence_unref(&fenceA); 513 1.1 riastrad 514 1.1 riastrad if (fenceB) 515 1.1 riastrad radeon_fence_unref(&fenceB); 516 1.1 riastrad 517 1.1 riastrad if (r) 518 1.5 riastrad pr_warn("Error while testing ring sync (%d)\n", r); 519 1.1 riastrad } 520 1.1 riastrad 521 1.1 riastrad static bool radeon_test_sync_possible(struct radeon_ring *ringA, 522 1.1 riastrad struct radeon_ring *ringB) 523 1.1 riastrad { 524 1.1 riastrad if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && 525 1.1 riastrad ringB->idx == TN_RING_TYPE_VCE1_INDEX) 526 1.1 riastrad return false; 527 1.1 riastrad 528 1.1 riastrad return true; 529 1.1 riastrad } 530 1.1 riastrad 531 1.1 riastrad void radeon_test_syncing(struct radeon_device *rdev) 532 1.1 riastrad { 533 1.1 riastrad int i, j, k; 534 1.1 riastrad 535 1.1 riastrad for (i = 1; i < RADEON_NUM_RINGS; ++i) { 536 1.1 riastrad struct radeon_ring *ringA = &rdev->ring[i]; 537 1.1 riastrad if (!ringA->ready) 538 1.1 riastrad continue; 539 1.1 riastrad 540 1.1 riastrad for (j = 0; j < i; ++j) { 541 1.1 riastrad struct radeon_ring *ringB = &rdev->ring[j]; 542 1.1 riastrad if (!ringB->ready) 543 1.1 riastrad continue; 544 1.1 riastrad 545 1.1 riastrad if (!radeon_test_sync_possible(ringA, ringB)) 546 1.1 riastrad continue; 547 1.1 riastrad 548 1.1 riastrad DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); 549 1.1 riastrad radeon_test_ring_sync(rdev, ringA, ringB); 550 1.1 riastrad 551 1.1 riastrad DRM_INFO("Testing syncing between rings %d and %d...\n", j, i); 552 1.1 riastrad radeon_test_ring_sync(rdev, ringB, ringA); 553 1.1 riastrad 554 1.1 riastrad for (k = 0; k < j; ++k) { 555 1.1 riastrad struct radeon_ring *ringC = &rdev->ring[k]; 556 1.1 riastrad if (!ringC->ready) 557 1.1 riastrad continue; 558 1.1 riastrad 559 1.1 riastrad if (!radeon_test_sync_possible(ringA, ringC)) 560 1.1 riastrad continue; 561 1.1 riastrad 562 1.1 riastrad if (!radeon_test_sync_possible(ringB, ringC)) 563 1.1 riastrad continue; 564 1.1 riastrad 565 1.1 riastrad DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); 566 1.1 riastrad radeon_test_ring_sync2(rdev, ringA, ringB, ringC); 567 1.1 riastrad 568 1.1 riastrad DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j); 569 1.1 riastrad radeon_test_ring_sync2(rdev, ringA, ringC, ringB); 570 1.1 riastrad 571 1.1 riastrad DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k); 572 1.1 riastrad radeon_test_ring_sync2(rdev, ringB, ringA, ringC); 573 1.1 riastrad 574 1.1 riastrad DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i); 575 1.1 riastrad radeon_test_ring_sync2(rdev, ringB, ringC, ringA); 576 1.1 riastrad 577 1.1 riastrad DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j); 578 1.1 riastrad radeon_test_ring_sync2(rdev, ringC, ringA, ringB); 579 1.1 riastrad 580 1.1 riastrad DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i); 581 1.1 riastrad radeon_test_ring_sync2(rdev, ringC, ringB, ringA); 582 1.1 riastrad } 583 1.1 riastrad } 584 1.1 riastrad } 585 1.1 riastrad } 586