Lines Matching refs:rdev
69 * @rdev: radeon_device pointer
75 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
77 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
78 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
90 * @rdev: radeon_device pointer
96 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
98 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
101 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
116 * @rdev: radeon_device pointer
121 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
128 &rdev->fence_drv[ring].lockup_work,
135 * @rdev: radeon_device pointer
142 int radeon_fence_emit(struct radeon_device *rdev,
153 (*fence)->rdev = rdev;
154 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
158 &rdev->fence_lock,
159 rdev->fence_context + ring,
161 radeon_fence_ring_emit(rdev, ring, *fence);
162 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
163 radeon_fence_schedule_check(rdev, ring);
178 BUG_ON(!spin_is_locked(&fence->rdev->fence_lock));
184 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
193 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
194 TAILQ_REMOVE(&fence->rdev->fence_check, fence, fence_check);
202 radeon_fence_wakeup_locked(struct radeon_device *rdev)
206 BUG_ON(!spin_is_locked(&rdev->fence_lock));
207 DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock);
208 TAILQ_FOREACH_SAFE(fence, &rdev->fence_check, fence_check, next) {
216 * @rdev: radeon_device pointer
223 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
229 BUG_ON(!spin_is_locked(&rdev->fence_lock));
252 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
254 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
255 seq = radeon_fence_read(rdev, ring);
279 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
282 radeon_fence_schedule_check(rdev, ring);
298 struct radeon_device *rdev;
303 rdev = fence_drv->rdev;
304 ring = fence_drv - &rdev->fence_drv[0];
306 spin_lock(&rdev->fence_lock);
308 if (!down_read_trylock(&rdev->exclusive_lock)) {
310 radeon_fence_schedule_check(rdev, ring);
311 spin_unlock(&rdev->fence_lock);
315 if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
319 spin_lock_irqsave(&rdev->irq.lock, irqflags);
320 radeon_irq_set(rdev);
321 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
324 if (radeon_fence_activity(rdev, ring))
325 radeon_fence_wakeup_locked(rdev);
327 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
330 dev_warn(rdev->dev, "GPU lockup (current fence id "
336 rdev->needs_reset = true;
337 radeon_fence_wakeup_locked(rdev);
339 up_read(&rdev->exclusive_lock);
340 spin_unlock(&rdev->fence_lock);
346 * @rdev: radeon_device pointer
352 static void radeon_fence_process_locked(struct radeon_device *rdev, int ring)
354 if (radeon_fence_activity(rdev, ring))
355 radeon_fence_wakeup_locked(rdev);
358 void radeon_fence_process(struct radeon_device *rdev, int ring)
361 spin_lock(&rdev->fence_lock);
362 radeon_fence_process_locked(rdev, ring);
363 spin_unlock(&rdev->fence_lock);
369 * @rdev: radeon device pointer
380 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
383 BUG_ON(!spin_is_locked(&rdev->fence_lock));
384 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
388 radeon_fence_process_locked(rdev, ring);
389 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
398 struct radeon_device *rdev = fence->rdev;
402 BUG_ON(!spin_is_locked(&rdev->fence_lock));
404 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
408 if (down_read_trylock(&rdev->exclusive_lock)) {
409 radeon_fence_process_locked(rdev, ring);
410 up_read(&rdev->exclusive_lock);
412 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
430 struct radeon_device *rdev = fence->rdev;
432 BUG_ON(!spin_is_locked(&rdev->fence_lock));
434 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
437 if (down_read_trylock(&rdev->exclusive_lock)) {
438 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
440 if (radeon_fence_activity(rdev, fence->ring))
441 radeon_fence_wakeup_locked(rdev);
444 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
445 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
446 up_read(&rdev->exclusive_lock);
450 up_read(&rdev->exclusive_lock);
453 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
454 rdev->fence_drv[fence->ring].delayed_irq = true;
455 radeon_fence_schedule_check(rdev, fence->ring);
458 TAILQ_INSERT_TAIL(&rdev->fence_check, fence, fence_check);
478 spin_lock(&fence->rdev->fence_lock);
479 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
485 spin_unlock(&fence->rdev->fence_lock);
488 spin_unlock(&fence->rdev->fence_lock);
495 * @rdev: radeon device pointer
503 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
507 BUG_ON(!spin_is_locked(&rdev->fence_lock));
510 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
519 * @rdev: radeon device pointer
533 static long radeon_fence_wait_seq_timeout_locked(struct radeon_device *rdev,
540 if (radeon_fence_any_seq_signaled(rdev, target_seq))
548 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
549 radeon_irq_kms_sw_irq_get(rdev, i);
553 DRM_SPIN_TIMED_WAIT_UNTIL(r, &rdev->fence_queue,
554 &rdev->fence_lock, timeout,
555 (radeon_fence_any_seq_signaled(rdev, target_seq)
556 || rdev->needs_reset));
558 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(r, &rdev->fence_queue,
559 &rdev->fence_lock, timeout,
560 (radeon_fence_any_seq_signaled(rdev, target_seq)
561 || rdev->needs_reset));
563 if (rdev->needs_reset)
570 radeon_irq_kms_sw_irq_put(rdev, i);
571 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
577 static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
582 spin_lock(&rdev->fence_lock);
583 r = radeon_fence_wait_seq_timeout_locked(rdev, target_seq, intr, timo);
584 spin_unlock(&rdev->fence_lock);
618 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
653 * @rdev: radeon device pointer
663 int radeon_fence_wait_any(struct radeon_device *rdev,
686 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
696 * @rdev: radeon device pointer
703 int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
708 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
709 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
714 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
723 * @rdev: radeon device pointer
730 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
735 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
739 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
744 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
784 * @rdev: radeon device pointer
791 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
798 radeon_fence_process(rdev, ring);
799 emitted = rdev->fence_drv[ring].sync_seq[ring]
800 - atomic64_read(&rdev->fence_drv[ring].last_seq);
832 fdrv = &fence->rdev->fence_drv[dst_ring];
863 src = &fence->rdev->fence_drv[fence->ring];
864 dst = &fence->rdev->fence_drv[dst_ring];
877 * @rdev: radeon device pointer
885 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
890 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
891 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
892 rdev->fence_drv[ring].scratch_reg = 0;
895 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
896 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
901 index = ALIGN(rdev->uvd_fw->size, 8);
902 rdev->fence_drv[ring].cpu_addr = (uint32_t *)((uint8_t *)rdev->uvd.cpu_addr + index);
903 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
907 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
909 dev_err(rdev->dev, "fence failed to get scratch register\n");
913 rdev->fence_drv[ring].scratch_reg -
914 rdev->scratch.reg_base;
915 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
916 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
918 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
919 rdev->fence_drv[ring].initialized = true;
920 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016"PRIx64" and cpu addr 0x%p\n",
921 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
929 * @rdev: radeon device pointer
935 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
939 rdev->fence_drv[ring].scratch_reg = -1;
940 rdev->fence_drv[ring].cpu_addr = NULL;
941 rdev->fence_drv[ring].gpu_addr = 0;
943 rdev->fence_drv[ring].sync_seq[i] = 0;
944 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
945 rdev->fence_drv[ring].initialized = false;
946 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
948 rdev->fence_drv[ring].rdev = rdev;
955 * @rdev: radeon device pointer
963 int radeon_fence_driver_init(struct radeon_device *rdev)
967 spin_lock_init(&rdev->fence_lock);
968 DRM_INIT_WAITQUEUE(&rdev->fence_queue, "radfence");
969 TAILQ_INIT(&rdev->fence_check);
971 radeon_fence_driver_init_ring(rdev, ring);
973 if (radeon_debugfs_fence_init(rdev)) {
974 dev_err(rdev->dev, "fence debugfs file creation failed\n");
983 * @rdev: radeon device pointer
987 void radeon_fence_driver_fini(struct radeon_device *rdev)
991 mutex_lock(&rdev->ring_lock);
993 if (!rdev->fence_drv[ring].initialized)
995 r = radeon_fence_wait_empty(rdev, ring);
998 radeon_fence_driver_force_completion(rdev, ring);
1000 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
1001 spin_lock(&rdev->fence_lock);
1002 radeon_fence_wakeup_locked(rdev);
1003 spin_unlock(&rdev->fence_lock);
1004 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
1005 rdev->fence_drv[ring].initialized = false;
1007 mutex_unlock(&rdev->ring_lock);
1009 BUG_ON(!TAILQ_EMPTY(&rdev->fence_check));
1010 DRM_DESTROY_WAITQUEUE(&rdev->fence_queue);
1011 spin_lock_destroy(&rdev->fence_lock);
1017 * @rdev: radeon device pointer
1023 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
1025 if (rdev->fence_drv[ring].initialized) {
1026 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
1027 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
1040 struct radeon_device *rdev = dev->dev_private;
1044 if (!rdev->fence_drv[i].initialized)
1047 radeon_fence_process(rdev, i);
1051 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
1053 rdev->fence_drv[i].sync_seq[i]);
1056 if (i != j && rdev->fence_drv[j].initialized)
1058 j, rdev->fence_drv[i].sync_seq[j]);
1073 struct radeon_device *rdev = dev->dev_private;
1075 down_read(&rdev->exclusive_lock);
1076 seq_printf(m, "%d\n", rdev->needs_reset);
1077 rdev->needs_reset = true;
1078 wake_up_all(&rdev->fence_queue);
1079 up_read(&rdev->exclusive_lock);
1090 int radeon_debugfs_fence_init(struct radeon_device *rdev)
1093 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1133 struct radeon_device *rdev = rfence->rdev;
1135 BUG_ON(!spin_is_locked(&rdev->fence_lock));
1136 DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock);
1143 struct radeon_device *rdev = fence->rdev;
1150 spin_lock(&rdev->fence_lock);
1152 DRM_SPIN_TIMED_WAIT_UNTIL(r, &rdev->fence_queue,
1153 &rdev->fence_lock, t,
1156 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(r, &rdev->fence_queue,
1157 &rdev->fence_lock, t,
1160 spin_unlock(&rdev->fence_lock);