Home | History | Annotate | Line # | Download | only in radeon
radeon_test.c revision 1.1.1.2
      1 /*	$NetBSD: radeon_test.c,v 1.1.1.2 2018/08/27 01:34:59 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2009 VMware, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Michel Dnzer
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: radeon_test.c,v 1.1.1.2 2018/08/27 01:34:59 riastradh Exp $");
     28 
     29 #include <drm/drmP.h>
     30 #include <drm/radeon_drm.h>
     31 #include "radeon_reg.h"
     32 #include "radeon.h"
     33 
     34 #define RADEON_TEST_COPY_BLIT 1
     35 #define RADEON_TEST_COPY_DMA  0
     36 
     37 
     38 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
     39 static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
     40 {
     41 	struct radeon_bo *vram_obj = NULL;
     42 	struct radeon_bo **gtt_obj = NULL;
     43 	uint64_t gtt_addr, vram_addr;
     44 	unsigned n, size;
     45 	int i, r, ring;
     46 
     47 	switch (flag) {
     48 	case RADEON_TEST_COPY_DMA:
     49 		ring = radeon_copy_dma_ring_index(rdev);
     50 		break;
     51 	case RADEON_TEST_COPY_BLIT:
     52 		ring = radeon_copy_blit_ring_index(rdev);
     53 		break;
     54 	default:
     55 		DRM_ERROR("Unknown copy method\n");
     56 		return;
     57 	}
     58 
     59 	size = 1024 * 1024;
     60 
     61 	/* Number of tests =
     62 	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
     63 	 */
     64 	n = rdev->mc.gtt_size - rdev->gart_pin_size;
     65 	n /= size;
     66 
     67 	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
     68 	if (!gtt_obj) {
     69 		DRM_ERROR("Failed to allocate %d pointers\n", n);
     70 		r = 1;
     71 		goto out_cleanup;
     72 	}
     73 
     74 	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
     75 			     0, NULL, NULL, &vram_obj);
     76 	if (r) {
     77 		DRM_ERROR("Failed to create VRAM object\n");
     78 		goto out_cleanup;
     79 	}
     80 	r = radeon_bo_reserve(vram_obj, false);
     81 	if (unlikely(r != 0))
     82 		goto out_unref;
     83 	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
     84 	if (r) {
     85 		DRM_ERROR("Failed to pin VRAM object\n");
     86 		goto out_unres;
     87 	}
     88 	for (i = 0; i < n; i++) {
     89 		void *gtt_map, *vram_map;
     90 		void **gtt_start, **gtt_end;
     91 		void **vram_start, **vram_end;
     92 		struct radeon_fence *fence = NULL;
     93 
     94 		r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
     95 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
     96 				     gtt_obj + i);
     97 		if (r) {
     98 			DRM_ERROR("Failed to create GTT object %d\n", i);
     99 			goto out_lclean;
    100 		}
    101 
    102 		r = radeon_bo_reserve(gtt_obj[i], false);
    103 		if (unlikely(r != 0))
    104 			goto out_lclean_unref;
    105 		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
    106 		if (r) {
    107 			DRM_ERROR("Failed to pin GTT object %d\n", i);
    108 			goto out_lclean_unres;
    109 		}
    110 
    111 		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
    112 		if (r) {
    113 			DRM_ERROR("Failed to map GTT object %d\n", i);
    114 			goto out_lclean_unpin;
    115 		}
    116 
    117 		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
    118 		     gtt_start < gtt_end;
    119 		     gtt_start++)
    120 			*gtt_start = gtt_start;
    121 
    122 		radeon_bo_kunmap(gtt_obj[i]);
    123 
    124 		if (ring == R600_RING_TYPE_DMA_INDEX)
    125 			fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
    126 						size / RADEON_GPU_PAGE_SIZE,
    127 						vram_obj->tbo.resv);
    128 		else
    129 			fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
    130 						 size / RADEON_GPU_PAGE_SIZE,
    131 						 vram_obj->tbo.resv);
    132 		if (IS_ERR(fence)) {
    133 			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
    134 			r = PTR_ERR(fence);
    135 			goto out_lclean_unpin;
    136 		}
    137 
    138 		r = radeon_fence_wait(fence, false);
    139 		if (r) {
    140 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
    141 			goto out_lclean_unpin;
    142 		}
    143 
    144 		radeon_fence_unref(&fence);
    145 
    146 		r = radeon_bo_kmap(vram_obj, &vram_map);
    147 		if (r) {
    148 			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
    149 			goto out_lclean_unpin;
    150 		}
    151 
    152 		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
    153 		     vram_start = vram_map, vram_end = vram_map + size;
    154 		     vram_start < vram_end;
    155 		     gtt_start++, vram_start++) {
    156 			if (*vram_start != gtt_start) {
    157 				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
    158 					  "expected 0x%p (GTT/VRAM offset "
    159 					  "0x%16llx/0x%16llx)\n",
    160 					  i, *vram_start, gtt_start,
    161 					  (unsigned long long)
    162 					  (gtt_addr - rdev->mc.gtt_start +
    163 					   (void*)gtt_start - gtt_map),
    164 					  (unsigned long long)
    165 					  (vram_addr - rdev->mc.vram_start +
    166 					   (void*)gtt_start - gtt_map));
    167 				radeon_bo_kunmap(vram_obj);
    168 				goto out_lclean_unpin;
    169 			}
    170 			*vram_start = vram_start;
    171 		}
    172 
    173 		radeon_bo_kunmap(vram_obj);
    174 
    175 		if (ring == R600_RING_TYPE_DMA_INDEX)
    176 			fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
    177 						size / RADEON_GPU_PAGE_SIZE,
    178 						vram_obj->tbo.resv);
    179 		else
    180 			fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
    181 						 size / RADEON_GPU_PAGE_SIZE,
    182 						 vram_obj->tbo.resv);
    183 		if (IS_ERR(fence)) {
    184 			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
    185 			r = PTR_ERR(fence);
    186 			goto out_lclean_unpin;
    187 		}
    188 
    189 		r = radeon_fence_wait(fence, false);
    190 		if (r) {
    191 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
    192 			goto out_lclean_unpin;
    193 		}
    194 
    195 		radeon_fence_unref(&fence);
    196 
    197 		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
    198 		if (r) {
    199 			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
    200 			goto out_lclean_unpin;
    201 		}
    202 
    203 		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
    204 		     vram_start = vram_map, vram_end = vram_map + size;
    205 		     gtt_start < gtt_end;
    206 		     gtt_start++, vram_start++) {
    207 			if (*gtt_start != vram_start) {
    208 				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
    209 					  "expected 0x%p (VRAM/GTT offset "
    210 					  "0x%16llx/0x%16llx)\n",
    211 					  i, *gtt_start, vram_start,
    212 					  (unsigned long long)
    213 					  (vram_addr - rdev->mc.vram_start +
    214 					   (void*)vram_start - vram_map),
    215 					  (unsigned long long)
    216 					  (gtt_addr - rdev->mc.gtt_start +
    217 					   (void*)vram_start - vram_map));
    218 				radeon_bo_kunmap(gtt_obj[i]);
    219 				goto out_lclean_unpin;
    220 			}
    221 		}
    222 
    223 		radeon_bo_kunmap(gtt_obj[i]);
    224 
    225 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
    226 			 gtt_addr - rdev->mc.gtt_start);
    227 		continue;
    228 
    229 out_lclean_unpin:
    230 		radeon_bo_unpin(gtt_obj[i]);
    231 out_lclean_unres:
    232 		radeon_bo_unreserve(gtt_obj[i]);
    233 out_lclean_unref:
    234 		radeon_bo_unref(&gtt_obj[i]);
    235 out_lclean:
    236 		for (--i; i >= 0; --i) {
    237 			radeon_bo_unpin(gtt_obj[i]);
    238 			radeon_bo_unreserve(gtt_obj[i]);
    239 			radeon_bo_unref(&gtt_obj[i]);
    240 		}
    241 		if (fence && !IS_ERR(fence))
    242 			radeon_fence_unref(&fence);
    243 		break;
    244 	}
    245 
    246 	radeon_bo_unpin(vram_obj);
    247 out_unres:
    248 	radeon_bo_unreserve(vram_obj);
    249 out_unref:
    250 	radeon_bo_unref(&vram_obj);
    251 out_cleanup:
    252 	kfree(gtt_obj);
    253 	if (r) {
    254 		printk(KERN_WARNING "Error while testing BO move.\n");
    255 	}
    256 }
    257 
    258 void radeon_test_moves(struct radeon_device *rdev)
    259 {
    260 	if (rdev->asic->copy.dma)
    261 		radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
    262 	if (rdev->asic->copy.blit)
    263 		radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
    264 }
    265 
    266 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
    267 					     struct radeon_ring *ring,
    268 					     struct radeon_fence **fence)
    269 {
    270 	uint32_t handle = ring->idx ^ 0xdeafbeef;
    271 	int r;
    272 
    273 	if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
    274 		r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
    275 		if (r) {
    276 			DRM_ERROR("Failed to get dummy create msg\n");
    277 			return r;
    278 		}
    279 
    280 		r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
    281 		if (r) {
    282 			DRM_ERROR("Failed to get dummy destroy msg\n");
    283 			return r;
    284 		}
    285 
    286 	} else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
    287 		   ring->idx == TN_RING_TYPE_VCE2_INDEX) {
    288 		r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
    289 		if (r) {
    290 			DRM_ERROR("Failed to get dummy create msg\n");
    291 			return r;
    292 		}
    293 
    294 		r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
    295 		if (r) {
    296 			DRM_ERROR("Failed to get dummy destroy msg\n");
    297 			return r;
    298 		}
    299 
    300 	} else {
    301 		r = radeon_ring_lock(rdev, ring, 64);
    302 		if (r) {
    303 			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
    304 			return r;
    305 		}
    306 		radeon_fence_emit(rdev, fence, ring->idx);
    307 		radeon_ring_unlock_commit(rdev, ring, false);
    308 	}
    309 	return 0;
    310 }
    311 
    312 void radeon_test_ring_sync(struct radeon_device *rdev,
    313 			   struct radeon_ring *ringA,
    314 			   struct radeon_ring *ringB)
    315 {
    316 	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
    317 	struct radeon_semaphore *semaphore = NULL;
    318 	int r;
    319 
    320 	r = radeon_semaphore_create(rdev, &semaphore);
    321 	if (r) {
    322 		DRM_ERROR("Failed to create semaphore\n");
    323 		goto out_cleanup;
    324 	}
    325 
    326 	r = radeon_ring_lock(rdev, ringA, 64);
    327 	if (r) {
    328 		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
    329 		goto out_cleanup;
    330 	}
    331 	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    332 	radeon_ring_unlock_commit(rdev, ringA, false);
    333 
    334 	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
    335 	if (r)
    336 		goto out_cleanup;
    337 
    338 	r = radeon_ring_lock(rdev, ringA, 64);
    339 	if (r) {
    340 		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
    341 		goto out_cleanup;
    342 	}
    343 	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    344 	radeon_ring_unlock_commit(rdev, ringA, false);
    345 
    346 	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
    347 	if (r)
    348 		goto out_cleanup;
    349 
    350 	mdelay(1000);
    351 
    352 	if (radeon_fence_signaled(fence1)) {
    353 		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
    354 		goto out_cleanup;
    355 	}
    356 
    357 	r = radeon_ring_lock(rdev, ringB, 64);
    358 	if (r) {
    359 		DRM_ERROR("Failed to lock ring B %p\n", ringB);
    360 		goto out_cleanup;
    361 	}
    362 	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    363 	radeon_ring_unlock_commit(rdev, ringB, false);
    364 
    365 	r = radeon_fence_wait(fence1, false);
    366 	if (r) {
    367 		DRM_ERROR("Failed to wait for sync fence 1\n");
    368 		goto out_cleanup;
    369 	}
    370 
    371 	mdelay(1000);
    372 
    373 	if (radeon_fence_signaled(fence2)) {
    374 		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
    375 		goto out_cleanup;
    376 	}
    377 
    378 	r = radeon_ring_lock(rdev, ringB, 64);
    379 	if (r) {
    380 		DRM_ERROR("Failed to lock ring B %p\n", ringB);
    381 		goto out_cleanup;
    382 	}
    383 	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
    384 	radeon_ring_unlock_commit(rdev, ringB, false);
    385 
    386 	r = radeon_fence_wait(fence2, false);
    387 	if (r) {
    388 		DRM_ERROR("Failed to wait for sync fence 1\n");
    389 		goto out_cleanup;
    390 	}
    391 
    392 out_cleanup:
    393 	radeon_semaphore_free(rdev, &semaphore, NULL);
    394 
    395 	if (fence1)
    396 		radeon_fence_unref(&fence1);
    397 
    398 	if (fence2)
    399 		radeon_fence_unref(&fence2);
    400 
    401 	if (r)
    402 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
    403 }
    404 
    405 static void radeon_test_ring_sync2(struct radeon_device *rdev,
    406 			    struct radeon_ring *ringA,
    407 			    struct radeon_ring *ringB,
    408 			    struct radeon_ring *ringC)
    409 {
    410 	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
    411 	struct radeon_semaphore *semaphore = NULL;
    412 	bool sigA, sigB;
    413 	int i, r;
    414 
    415 	r = radeon_semaphore_create(rdev, &semaphore);
    416 	if (r) {
    417 		DRM_ERROR("Failed to create semaphore\n");
    418 		goto out_cleanup;
    419 	}
    420 
    421 	r = radeon_ring_lock(rdev, ringA, 64);
    422 	if (r) {
    423 		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
    424 		goto out_cleanup;
    425 	}
    426 	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
    427 	radeon_ring_unlock_commit(rdev, ringA, false);
    428 
    429 	r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
    430 	if (r)
    431 		goto out_cleanup;
    432 
    433 	r = radeon_ring_lock(rdev, ringB, 64);
    434 	if (r) {
    435 		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
    436 		goto out_cleanup;
    437 	}
    438 	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
    439 	radeon_ring_unlock_commit(rdev, ringB, false);
    440 	r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
    441 	if (r)
    442 		goto out_cleanup;
    443 
    444 	mdelay(1000);
    445 
    446 	if (radeon_fence_signaled(fenceA)) {
    447 		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
    448 		goto out_cleanup;
    449 	}
    450 	if (radeon_fence_signaled(fenceB)) {
    451 		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
    452 		goto out_cleanup;
    453 	}
    454 
    455 	r = radeon_ring_lock(rdev, ringC, 64);
    456 	if (r) {
    457 		DRM_ERROR("Failed to lock ring B %p\n", ringC);
    458 		goto out_cleanup;
    459 	}
    460 	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
    461 	radeon_ring_unlock_commit(rdev, ringC, false);
    462 
    463 	for (i = 0; i < 30; ++i) {
    464 		mdelay(100);
    465 		sigA = radeon_fence_signaled(fenceA);
    466 		sigB = radeon_fence_signaled(fenceB);
    467 		if (sigA || sigB)
    468 			break;
    469 	}
    470 
    471 	if (!sigA && !sigB) {
    472 		DRM_ERROR("Neither fence A nor B has been signaled\n");
    473 		goto out_cleanup;
    474 	} else if (sigA && sigB) {
    475 		DRM_ERROR("Both fence A and B has been signaled\n");
    476 		goto out_cleanup;
    477 	}
    478 
    479 	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
    480 
    481 	r = radeon_ring_lock(rdev, ringC, 64);
    482 	if (r) {
    483 		DRM_ERROR("Failed to lock ring B %p\n", ringC);
    484 		goto out_cleanup;
    485 	}
    486 	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
    487 	radeon_ring_unlock_commit(rdev, ringC, false);
    488 
    489 	mdelay(1000);
    490 
    491 	r = radeon_fence_wait(fenceA, false);
    492 	if (r) {
    493 		DRM_ERROR("Failed to wait for sync fence A\n");
    494 		goto out_cleanup;
    495 	}
    496 	r = radeon_fence_wait(fenceB, false);
    497 	if (r) {
    498 		DRM_ERROR("Failed to wait for sync fence B\n");
    499 		goto out_cleanup;
    500 	}
    501 
    502 out_cleanup:
    503 	radeon_semaphore_free(rdev, &semaphore, NULL);
    504 
    505 	if (fenceA)
    506 		radeon_fence_unref(&fenceA);
    507 
    508 	if (fenceB)
    509 		radeon_fence_unref(&fenceB);
    510 
    511 	if (r)
    512 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
    513 }
    514 
    515 static bool radeon_test_sync_possible(struct radeon_ring *ringA,
    516 				      struct radeon_ring *ringB)
    517 {
    518 	if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
    519 	    ringB->idx == TN_RING_TYPE_VCE1_INDEX)
    520 		return false;
    521 
    522 	return true;
    523 }
    524 
    525 void radeon_test_syncing(struct radeon_device *rdev)
    526 {
    527 	int i, j, k;
    528 
    529 	for (i = 1; i < RADEON_NUM_RINGS; ++i) {
    530 		struct radeon_ring *ringA = &rdev->ring[i];
    531 		if (!ringA->ready)
    532 			continue;
    533 
    534 		for (j = 0; j < i; ++j) {
    535 			struct radeon_ring *ringB = &rdev->ring[j];
    536 			if (!ringB->ready)
    537 				continue;
    538 
    539 			if (!radeon_test_sync_possible(ringA, ringB))
    540 				continue;
    541 
    542 			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
    543 			radeon_test_ring_sync(rdev, ringA, ringB);
    544 
    545 			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
    546 			radeon_test_ring_sync(rdev, ringB, ringA);
    547 
    548 			for (k = 0; k < j; ++k) {
    549 				struct radeon_ring *ringC = &rdev->ring[k];
    550 				if (!ringC->ready)
    551 					continue;
    552 
    553 				if (!radeon_test_sync_possible(ringA, ringC))
    554 					continue;
    555 
    556 				if (!radeon_test_sync_possible(ringB, ringC))
    557 					continue;
    558 
    559 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
    560 				radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
    561 
    562 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
    563 				radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
    564 
    565 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
    566 				radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
    567 
    568 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
    569 				radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
    570 
    571 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
    572 				radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
    573 
    574 				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
    575 				radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
    576 			}
    577 		}
    578 	}
    579 }
    580