Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_fence.c revision 1.3.10.1
      1 /*	$NetBSD: amdgpu_fence.c,v 1.3.10.1 2020/02/29 20:20:13 ad Exp $	*/
      2 
      3 /*
      4  * Copyright 2009 Jerome Glisse.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sub license, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  * The above copyright notice and this permission notice (including the
     24  * next paragraph) shall be included in all copies or substantial portions
     25  * of the Software.
     26  *
     27  */
     28 /*
     29  * Authors:
     30  *    Jerome Glisse <glisse (at) freedesktop.org>
     31  *    Dave Airlie
     32  */
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: amdgpu_fence.c,v 1.3.10.1 2020/02/29 20:20:13 ad Exp $");
     35 
     36 #include <linux/seq_file.h>
     37 #include <linux/atomic.h>
     38 #include <linux/wait.h>
     39 #include <linux/kref.h>
     40 #include <linux/slab.h>
     41 #include <linux/firmware.h>
     42 #include <drm/drmP.h>
     43 #include "amdgpu.h"
     44 #include "amdgpu_trace.h"
     45 
     46 #include <linux/nbsd-namespace.h>
     47 
     48 /*
     49  * Fences
     50  * Fences mark an event in the GPUs pipeline and are used
     51  * for GPU/CPU synchronization.  When the fence is written,
     52  * it is expected that all buffers associated with that fence
     53  * are no longer in use by the associated ring on the GPU and
     54  * that the the relevant GPU caches have been flushed.
     55  */
     56 
     57 static struct kmem_cache *amdgpu_fence_slab;
     58 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
     59 
     60 /**
     61  * amdgpu_fence_write - write a fence value
     62  *
     63  * @ring: ring the fence is associated with
     64  * @seq: sequence number to write
     65  *
     66  * Writes a fence value to memory (all asics).
     67  */
     68 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
     69 {
     70 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
     71 
     72 	if (drv->cpu_addr)
     73 		*drv->cpu_addr = cpu_to_le32(seq);
     74 }
     75 
     76 /**
     77  * amdgpu_fence_read - read a fence value
     78  *
     79  * @ring: ring the fence is associated with
     80  *
     81  * Reads a fence value from memory (all asics).
     82  * Returns the value of the fence read from memory.
     83  */
     84 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
     85 {
     86 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
     87 	u32 seq = 0;
     88 
     89 	if (drv->cpu_addr)
     90 		seq = le32_to_cpu(*drv->cpu_addr);
     91 	else
     92 		seq = lower_32_bits(atomic64_read(&drv->last_seq));
     93 
     94 	return seq;
     95 }
     96 
     97 /**
     98  * amdgpu_fence_emit - emit a fence on the requested ring
     99  *
    100  * @ring: ring the fence is associated with
    101  * @owner: creator of the fence
    102  * @fence: amdgpu fence object
    103  *
    104  * Emits a fence command on the requested ring (all asics).
    105  * Returns 0 on success, -ENOMEM on failure.
    106  */
    107 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
    108 		      struct amdgpu_fence **fence)
    109 {
    110 	struct amdgpu_device *adev = ring->adev;
    111 
    112 	/* we are protected by the ring emission mutex */
    113 	*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
    114 	if ((*fence) == NULL) {
    115 		return -ENOMEM;
    116 	}
    117 	(*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
    118 	(*fence)->ring = ring;
    119 	(*fence)->owner = owner;
    120 	fence_init(&(*fence)->base, &amdgpu_fence_ops,
    121 #ifdef __NetBSD__
    122 		&ring->fence_drv.fence_lock,
    123 #else
    124 		&ring->fence_drv.fence_queue.lock,
    125 #endif
    126 		adev->fence_context + ring->idx,
    127 		(*fence)->seq);
    128 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
    129 			       (*fence)->seq,
    130 			       AMDGPU_FENCE_FLAG_INT);
    131 	return 0;
    132 }
    133 
    134 /**
    135  * amdgpu_fence_schedule_fallback - schedule fallback check
    136  *
    137  * @ring: pointer to struct amdgpu_ring
    138  *
    139  * Start a timer as fallback to our interrupts.
    140  */
    141 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
    142 {
    143 	mod_timer(&ring->fence_drv.fallback_timer,
    144 		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
    145 }
    146 
    147 /**
    148  * amdgpu_fence_activity - check for fence activity
    149  *
    150  * @ring: pointer to struct amdgpu_ring
    151  *
    152  * Checks the current fence value and calculates the last
    153  * signalled fence value. Returns true if activity occured
    154  * on the ring, and the fence_queue should be waken up.
    155  */
    156 static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
    157 {
    158 	uint64_t seq, last_seq, last_emitted;
    159 	unsigned count_loop = 0;
    160 	bool wake = false;
    161 
    162 	BUG_ON(!spin_is_locked(&ring->fence_drv.fence_lock));
    163 
    164 	/* Note there is a scenario here for an infinite loop but it's
    165 	 * very unlikely to happen. For it to happen, the current polling
    166 	 * process need to be interrupted by another process and another
    167 	 * process needs to update the last_seq btw the atomic read and
    168 	 * xchg of the current process.
    169 	 *
    170 	 * More over for this to go in infinite loop there need to be
    171 	 * continuously new fence signaled ie amdgpu_fence_read needs
    172 	 * to return a different value each time for both the currently
    173 	 * polling process and the other process that xchg the last_seq
    174 	 * btw atomic read and xchg of the current process. And the
    175 	 * value the other process set as last seq must be higher than
    176 	 * the seq value we just read. Which means that current process
    177 	 * need to be interrupted after amdgpu_fence_read and before
    178 	 * atomic xchg.
    179 	 *
    180 	 * To be even more safe we count the number of time we loop and
    181 	 * we bail after 10 loop just accepting the fact that we might
    182 	 * have temporarly set the last_seq not to the true real last
    183 	 * seq but to an older one.
    184 	 */
    185 	last_seq = atomic64_read(&ring->fence_drv.last_seq);
    186 	do {
    187 		last_emitted = ring->fence_drv.sync_seq[ring->idx];
    188 		seq = amdgpu_fence_read(ring);
    189 		seq |= last_seq & 0xffffffff00000000LL;
    190 		if (seq < last_seq) {
    191 			seq &= 0xffffffff;
    192 			seq |= last_emitted & 0xffffffff00000000LL;
    193 		}
    194 
    195 		if (seq <= last_seq || seq > last_emitted) {
    196 			break;
    197 		}
    198 		/* If we loop over we don't want to return without
    199 		 * checking if a fence is signaled as it means that the
    200 		 * seq we just read is different from the previous on.
    201 		 */
    202 		wake = true;
    203 		last_seq = seq;
    204 		if ((count_loop++) > 10) {
    205 			/* We looped over too many time leave with the
    206 			 * fact that we might have set an older fence
    207 			 * seq then the current real last seq as signaled
    208 			 * by the hw.
    209 			 */
    210 			break;
    211 		}
    212 	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
    213 
    214 	if (seq < last_emitted)
    215 		amdgpu_fence_schedule_fallback(ring);
    216 
    217 	return wake;
    218 }
    219 
    220 #ifdef __NetBSD__
    221 static int amdgpu_fence_check_signaled(struct amdgpu_fence *);
    222 
    223 static void
    224 amdgpu_fence_wakeup_locked(struct amdgpu_ring *ring)
    225 {
    226 	struct amdgpu_fence *fence, *next;
    227 
    228 	BUG_ON(!spin_is_locked(&ring->fence_drv.fence_lock));
    229 	DRM_SPIN_WAKEUP_ALL(&ring->fence_drv.fence_queue,
    230 	    &ring->fence_drv.fence_lock);
    231 	TAILQ_FOREACH_SAFE(fence, &ring->fence_drv.fence_check, fence_check,
    232 	    next) {
    233 		amdgpu_fence_check_signaled(fence);
    234 	}
    235 }
    236 #endif
    237 
    238 /**
    239  * amdgpu_fence_process - process a fence
    240  *
    241  * @adev: amdgpu_device pointer
    242  * @ring: ring index the fence is associated with
    243  *
    244  * Checks the current fence value and wakes the fence queue
    245  * if the sequence number has increased (all asics).
    246  */
    247 static void amdgpu_fence_process_locked(struct amdgpu_ring *ring)
    248 {
    249 	if (amdgpu_fence_activity(ring))
    250 #ifdef __NetBSD__
    251 		amdgpu_fence_wakeup_locked(ring);
    252 #else
    253 		wake_up_all(&ring->fence_drv.fence_queue);
    254 #endif
    255 }
    256 
    257 void amdgpu_fence_process(struct amdgpu_ring *ring)
    258 {
    259 
    260 	spin_lock(&ring->fence_drv.fence_lock);
    261 	amdgpu_fence_process_locked(ring);
    262 	spin_unlock(&ring->fence_drv.fence_lock);
    263 }
    264 
    265 /**
    266  * amdgpu_fence_fallback - fallback for hardware interrupts
    267  *
    268  * @work: delayed work item
    269  *
    270  * Checks for fence activity.
    271  */
    272 static void amdgpu_fence_fallback(unsigned long arg)
    273 {
    274 	struct amdgpu_ring *ring = (void *)arg;
    275 
    276 	amdgpu_fence_process(ring);
    277 }
    278 
    279 /**
    280  * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
    281  *
    282  * @ring: ring the fence is associated with
    283  * @seq: sequence number
    284  *
    285  * Check if the last signaled fence sequnce number is >= the requested
    286  * sequence number (all asics).
    287  * Returns true if the fence has signaled (current fence value
    288  * is >= requested value) or false if it has not (current fence
    289  * value is < the requested value.  Helper function for
    290  * amdgpu_fence_signaled().
    291  */
    292 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
    293 {
    294 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
    295 		return true;
    296 
    297 	/* poll new last sequence at least once */
    298 	amdgpu_fence_process(ring);
    299 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
    300 		return true;
    301 
    302 	return false;
    303 }
    304 
    305 /*
    306  * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
    307  * @ring: ring to wait on for the seq number
    308  * @seq: seq number wait for
    309  *
    310  * return value:
    311  * 0: seq signaled, and gpu not hang
    312  * -EDEADL: GPU hang detected
    313  * -EINVAL: some paramter is not valid
    314  */
    315 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
    316 {
    317 	bool signaled = false;
    318 
    319 	BUG_ON(!ring);
    320 	BUG_ON(!spin_is_locked(&ring->fence_drv.fence_lock));
    321 	if (seq > ring->fence_drv.sync_seq[ring->idx])
    322 		return -EINVAL;
    323 
    324 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
    325 		return 0;
    326 
    327 	amdgpu_fence_schedule_fallback(ring);
    328 #ifdef __NetBSD__
    329 	/* XXX How is this ever supposed to wake up in the EDEADLK case?  */
    330 	int r __unused;
    331 	DRM_SPIN_WAIT_NOINTR_UNTIL(r, &ring->fence_drv.fence_queue,
    332 	    &ring->fence_drv.fence_lock,
    333 	    (signaled = amdgpu_fence_seq_signaled(ring, seq)));
    334 #else
    335 	wait_event(ring->fence_drv.fence_queue, (
    336 		   (signaled = amdgpu_fence_seq_signaled(ring, seq))));
    337 #endif
    338 
    339 	if (signaled)
    340 		return 0;
    341 	else
    342 		return -EDEADLK;
    343 }
    344 
    345 /**
    346  * amdgpu_fence_wait_next - wait for the next fence to signal
    347  *
    348  * @adev: amdgpu device pointer
    349  * @ring: ring index the fence is associated with
    350  *
    351  * Wait for the next fence on the requested ring to signal (all asics).
    352  * Returns 0 if the next fence has passed, error for all other cases.
    353  * Caller must hold ring lock.
    354  */
    355 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
    356 {
    357 	uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
    358 
    359 	if (seq >= ring->fence_drv.sync_seq[ring->idx])
    360 		return -ENOENT;
    361 
    362 	return amdgpu_fence_ring_wait_seq(ring, seq);
    363 }
    364 
    365 /**
    366  * amdgpu_fence_wait_empty - wait for all fences to signal
    367  *
    368  * @adev: amdgpu device pointer
    369  * @ring: ring index the fence is associated with
    370  *
    371  * Wait for all fences on the requested ring to signal (all asics).
    372  * Returns 0 if the fences have passed, error for all other cases.
    373  * Caller must hold ring lock.
    374  */
    375 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
    376 {
    377 	uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
    378 
    379 	if (!seq)
    380 		return 0;
    381 
    382 	return amdgpu_fence_ring_wait_seq(ring, seq);
    383 }
    384 
    385 /**
    386  * amdgpu_fence_count_emitted - get the count of emitted fences
    387  *
    388  * @ring: ring the fence is associated with
    389  *
    390  * Get the number of fences emitted on the requested ring (all asics).
    391  * Returns the number of emitted fences on the ring.  Used by the
    392  * dynpm code to ring track activity.
    393  */
    394 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
    395 {
    396 	uint64_t emitted;
    397 
    398 	/* We are not protected by ring lock when reading the last sequence
    399 	 * but it's ok to report slightly wrong fence count here.
    400 	 */
    401 	amdgpu_fence_process(ring);
    402 	emitted = ring->fence_drv.sync_seq[ring->idx]
    403 		- atomic64_read(&ring->fence_drv.last_seq);
    404 	/* to avoid 32bits warp around */
    405 	if (emitted > 0x10000000)
    406 		emitted = 0x10000000;
    407 
    408 	return (unsigned)emitted;
    409 }
    410 
    411 /**
    412  * amdgpu_fence_need_sync - do we need a semaphore
    413  *
    414  * @fence: amdgpu fence object
    415  * @dst_ring: which ring to check against
    416  *
    417  * Check if the fence needs to be synced against another ring
    418  * (all asics).  If so, we need to emit a semaphore.
    419  * Returns true if we need to sync with another ring, false if
    420  * not.
    421  */
    422 bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
    423 			    struct amdgpu_ring *dst_ring)
    424 {
    425 	struct amdgpu_fence_driver *fdrv;
    426 
    427 	if (!fence)
    428 		return false;
    429 
    430 	if (fence->ring == dst_ring)
    431 		return false;
    432 
    433 	/* we are protected by the ring mutex */
    434 	fdrv = &dst_ring->fence_drv;
    435 	if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
    436 		return false;
    437 
    438 	return true;
    439 }
    440 
    441 /**
    442  * amdgpu_fence_note_sync - record the sync point
    443  *
    444  * @fence: amdgpu fence object
    445  * @dst_ring: which ring to check against
    446  *
    447  * Note the sequence number at which point the fence will
    448  * be synced with the requested ring (all asics).
    449  */
    450 void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
    451 			    struct amdgpu_ring *dst_ring)
    452 {
    453 	struct amdgpu_fence_driver *dst, *src;
    454 	unsigned i;
    455 
    456 	if (!fence)
    457 		return;
    458 
    459 	if (fence->ring == dst_ring)
    460 		return;
    461 
    462 	/* we are protected by the ring mutex */
    463 	src = &fence->ring->fence_drv;
    464 	dst = &dst_ring->fence_drv;
    465 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
    466 		if (i == dst_ring->idx)
    467 			continue;
    468 
    469 		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
    470 	}
    471 }
    472 
    473 /**
    474  * amdgpu_fence_driver_start_ring - make the fence driver
    475  * ready for use on the requested ring.
    476  *
    477  * @ring: ring to start the fence driver on
    478  * @irq_src: interrupt source to use for this ring
    479  * @irq_type: interrupt type to use for this ring
    480  *
    481  * Make the fence driver ready for processing (all asics).
    482  * Not all asics have all rings, so each asic will only
    483  * start the fence driver on the rings it has.
    484  * Returns 0 for success, errors for failure.
    485  */
    486 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
    487 				   struct amdgpu_irq_src *irq_src,
    488 				   unsigned irq_type)
    489 {
    490 	struct amdgpu_device *adev = ring->adev;
    491 	uint64_t index;
    492 
    493 	if (ring != &adev->uvd.ring) {
    494 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
    495 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
    496 	} else {
    497 		/* put fence directly behind firmware */
    498 		index = ALIGN(adev->uvd.fw->size, 8);
    499 		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
    500 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
    501 	}
    502 	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
    503 	amdgpu_irq_get(adev, irq_src, irq_type);
    504 
    505 	ring->fence_drv.irq_src = irq_src;
    506 	ring->fence_drv.irq_type = irq_type;
    507 	ring->fence_drv.initialized = true;
    508 
    509 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016"PRIx64", "
    510 		 "cpu addr 0x%p\n", ring->idx,
    511 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
    512 	return 0;
    513 }
    514 
    515 /**
    516  * amdgpu_fence_driver_init_ring - init the fence driver
    517  * for the requested ring.
    518  *
    519  * @ring: ring to init the fence driver on
    520  *
    521  * Init the fence driver for the requested ring (all asics).
    522  * Helper function for amdgpu_fence_driver_init().
    523  */
    524 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
    525 {
    526 	int i, r;
    527 
    528 	ring->fence_drv.cpu_addr = NULL;
    529 	ring->fence_drv.gpu_addr = 0;
    530 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
    531 		ring->fence_drv.sync_seq[i] = 0;
    532 
    533 	atomic64_set(&ring->fence_drv.last_seq, 0);
    534 	ring->fence_drv.initialized = false;
    535 
    536 	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
    537 		    (unsigned long)ring);
    538 
    539 #ifdef __NetBSD__
    540 	spin_lock_init(&ring->fence_drv.fence_lock);
    541 	DRM_INIT_WAITQUEUE(&ring->fence_drv.fence_queue, "amdfence");
    542 	TAILQ_INIT(&ring->fence_drv.fence_check);
    543 #else
    544 	init_waitqueue_head(&ring->fence_drv.fence_queue);
    545 #endif
    546 
    547 	if (amdgpu_enable_scheduler) {
    548 		long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
    549 		if (timeout == 0) {
    550 			/*
    551 			 * FIXME:
    552 			 * Delayed workqueue cannot use it directly,
    553 			 * so the scheduler will not use delayed workqueue if
    554 			 * MAX_SCHEDULE_TIMEOUT is set.
    555 			 * Currently keep it simple and silly.
    556 			 */
    557 			timeout = MAX_SCHEDULE_TIMEOUT;
    558 		}
    559 		r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
    560 				   amdgpu_sched_hw_submission,
    561 				   timeout, ring->name);
    562 		if (r) {
    563 			DRM_ERROR("Failed to create scheduler on ring %s.\n",
    564 				  ring->name);
    565 			return r;
    566 		}
    567 	}
    568 
    569 	return 0;
    570 }
    571 
    572 /**
    573  * amdgpu_fence_driver_init - init the fence driver
    574  * for all possible rings.
    575  *
    576  * @adev: amdgpu device pointer
    577  *
    578  * Init the fence driver for all possible rings (all asics).
    579  * Not all asics have all rings, so each asic will only
    580  * start the fence driver on the rings it has using
    581  * amdgpu_fence_driver_start_ring().
    582  * Returns 0 for success.
    583  */
    584 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
    585 {
    586 	if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
    587 		amdgpu_fence_slab = kmem_cache_create(
    588 			"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
    589 			SLAB_HWCACHE_ALIGN, NULL);
    590 		if (!amdgpu_fence_slab)
    591 			return -ENOMEM;
    592 	}
    593 	if (amdgpu_debugfs_fence_init(adev))
    594 		dev_err(adev->dev, "fence debugfs file creation failed\n");
    595 
    596 	return 0;
    597 }
    598 
    599 /**
    600  * amdgpu_fence_driver_fini - tear down the fence driver
    601  * for all possible rings.
    602  *
    603  * @adev: amdgpu device pointer
    604  *
    605  * Tear down the fence driver for all possible rings (all asics).
    606  */
    607 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
    608 {
    609 	int i, r;
    610 
    611 	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
    612 		kmem_cache_destroy(amdgpu_fence_slab);
    613 	mutex_lock(&adev->ring_lock);
    614 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
    615 		struct amdgpu_ring *ring = adev->rings[i];
    616 
    617 		if (!ring || !ring->fence_drv.initialized)
    618 			continue;
    619 		r = amdgpu_fence_wait_empty(ring);
    620 		if (r) {
    621 			/* no need to trigger GPU reset as we are unloading */
    622 			amdgpu_fence_driver_force_completion(adev);
    623 		}
    624 #ifdef __NetBSD__
    625 		spin_lock(&ring->fence_drv.fence_lock);
    626 		amdgpu_fence_wakeup_locked(ring);
    627 		spin_unlock(&ring->fence_drv.fence_lock);
    628 #else
    629 		wake_up_all(&ring->fence_drv.fence_queue);
    630 #endif
    631 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
    632 			       ring->fence_drv.irq_type);
    633 		amd_sched_fini(&ring->sched);
    634 		del_timer_sync(&ring->fence_drv.fallback_timer);
    635 		ring->fence_drv.initialized = false;
    636 #ifdef __NetBSD__
    637 		BUG_ON(!TAILQ_EMPTY(&ring->fence_drv.fence_check));
    638 		DRM_DESTROY_WAITQUEUE(&ring->fence_drv.fence_queue);
    639 		spin_lock_destroy(&ring->fence_drv.fence_lock);
    640 #endif
    641 	}
    642 	mutex_unlock(&adev->ring_lock);
    643 }
    644 
    645 /**
    646  * amdgpu_fence_driver_suspend - suspend the fence driver
    647  * for all possible rings.
    648  *
    649  * @adev: amdgpu device pointer
    650  *
    651  * Suspend the fence driver for all possible rings (all asics).
    652  */
    653 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
    654 {
    655 	int i, r;
    656 
    657 	mutex_lock(&adev->ring_lock);
    658 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
    659 		struct amdgpu_ring *ring = adev->rings[i];
    660 		if (!ring || !ring->fence_drv.initialized)
    661 			continue;
    662 
    663 		/* wait for gpu to finish processing current batch */
    664 		r = amdgpu_fence_wait_empty(ring);
    665 		if (r) {
    666 			/* delay GPU reset to resume */
    667 			amdgpu_fence_driver_force_completion(adev);
    668 		}
    669 
    670 		/* disable the interrupt */
    671 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
    672 			       ring->fence_drv.irq_type);
    673 	}
    674 	mutex_unlock(&adev->ring_lock);
    675 }
    676 
    677 /**
    678  * amdgpu_fence_driver_resume - resume the fence driver
    679  * for all possible rings.
    680  *
    681  * @adev: amdgpu device pointer
    682  *
    683  * Resume the fence driver for all possible rings (all asics).
    684  * Not all asics have all rings, so each asic will only
    685  * start the fence driver on the rings it has using
    686  * amdgpu_fence_driver_start_ring().
    687  * Returns 0 for success.
    688  */
    689 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
    690 {
    691 	int i;
    692 
    693 	mutex_lock(&adev->ring_lock);
    694 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
    695 		struct amdgpu_ring *ring = adev->rings[i];
    696 		if (!ring || !ring->fence_drv.initialized)
    697 			continue;
    698 
    699 		/* enable the interrupt */
    700 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
    701 			       ring->fence_drv.irq_type);
    702 	}
    703 	mutex_unlock(&adev->ring_lock);
    704 }
    705 
    706 /**
    707  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
    708  *
    709  * @adev: amdgpu device pointer
    710  *
    711  * In case of GPU reset failure make sure no process keep waiting on fence
    712  * that will never complete.
    713  */
    714 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
    715 {
    716 	int i;
    717 
    718 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
    719 		struct amdgpu_ring *ring = adev->rings[i];
    720 		if (!ring || !ring->fence_drv.initialized)
    721 			continue;
    722 
    723 		amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
    724 	}
    725 }
    726 
    727 /*
    728  * Common fence implementation
    729  */
    730 
    731 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
    732 {
    733 	return "amdgpu";
    734 }
    735 
    736 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
    737 {
    738 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
    739 	return (const char *)fence->ring->name;
    740 }
    741 
    742 /**
    743  * amdgpu_fence_is_signaled - test if fence is signaled
    744  *
    745  * @f: fence to test
    746  *
    747  * Test the fence sequence number if it is already signaled. If it isn't
    748  * signaled start fence processing. Returns True if the fence is signaled.
    749  */
    750 static bool amdgpu_fence_is_signaled(struct fence *f)
    751 {
    752 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
    753 	struct amdgpu_ring *ring = fence->ring;
    754 
    755 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
    756 		return true;
    757 
    758 	amdgpu_fence_process(ring);
    759 
    760 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
    761 		return true;
    762 
    763 	return false;
    764 }
    765 
    766 /**
    767  * amdgpu_fence_check_signaled - callback from fence_queue
    768  *
    769  * this function is called with fence_queue lock held, which is also used
    770  * for the fence locking itself, so unlocked variants are used for
    771  * fence_signal, and remove_wait_queue.
    772  */
    773 #ifdef __NetBSD__
    774 static int amdgpu_fence_check_signaled(struct amdgpu_fence *fence)
    775 #else
    776 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
    777 #endif
    778 {
    779 #ifndef __NetBSD__
    780 	struct amdgpu_fence *fence;
    781 #endif
    782 	u64 seq;
    783 	int ret;
    784 
    785 #ifndef __NetBSD__
    786 	fence = container_of(wait, struct amdgpu_fence, fence_wake);
    787 #endif
    788 	BUG_ON(!spin_is_locked(&fence->ring->fence_drv.fence_lock));
    789 
    790 	/*
    791 	 * We cannot use amdgpu_fence_process here because we're already
    792 	 * in the waitqueue, in a call from wake_up_all.
    793 	 */
    794 	seq = atomic64_read(&fence->ring->fence_drv.last_seq);
    795 	if (seq >= fence->seq) {
    796 		ret = fence_signal_locked(&fence->base);
    797 		if (!ret)
    798 			FENCE_TRACE(&fence->base, "signaled from irq context\n");
    799 		else
    800 			FENCE_TRACE(&fence->base, "was already signaled\n");
    801 
    802 #ifdef __NetBSD__
    803 		TAILQ_REMOVE(&fence->ring->fence_drv.fence_check, fence,
    804 		    fence_check);
    805 #else
    806 		__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
    807 #endif
    808 		fence_put(&fence->base);
    809 	} else
    810 		FENCE_TRACE(&fence->base, "pending\n");
    811 	return 0;
    812 }
    813 
    814 /**
    815  * amdgpu_fence_enable_signaling - enable signalling on fence
    816  * @fence: fence
    817  *
    818  * This function is called with fence_queue lock held, and adds a callback
    819  * to fence_queue that checks if this fence is signaled, and if so it
    820  * signals the fence and removes itself.
    821  */
    822 static bool amdgpu_fence_enable_signaling(struct fence *f)
    823 {
    824 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
    825 	struct amdgpu_ring *ring = fence->ring;
    826 
    827 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
    828 		return false;
    829 
    830 #ifdef __NetBSD__
    831 	TAILQ_INSERT_TAIL(&ring->fence_drv.fence_check, fence, fence_check);
    832 #else
    833 	fence->fence_wake.flags = 0;
    834 	fence->fence_wake.private = NULL;
    835 	fence->fence_wake.func = amdgpu_fence_check_signaled;
    836 	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
    837 #endif
    838 	fence_get(f);
    839 	if (!timer_pending(&ring->fence_drv.fallback_timer))
    840 		amdgpu_fence_schedule_fallback(ring);
    841 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
    842 	return true;
    843 }
    844 
    845 static void amdgpu_fence_release(struct fence *f)
    846 {
    847 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
    848 	kmem_cache_free(amdgpu_fence_slab, fence);
    849 }
    850 
    851 const struct fence_ops amdgpu_fence_ops = {
    852 	.get_driver_name = amdgpu_fence_get_driver_name,
    853 	.get_timeline_name = amdgpu_fence_get_timeline_name,
    854 	.enable_signaling = amdgpu_fence_enable_signaling,
    855 	.signaled = amdgpu_fence_is_signaled,
    856 	.wait = fence_default_wait,
    857 	.release = amdgpu_fence_release,
    858 };
    859 
    860 /*
    861  * Fence debugfs
    862  */
    863 #if defined(CONFIG_DEBUG_FS)
    864 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
    865 {
    866 	struct drm_info_node *node = (struct drm_info_node *)m->private;
    867 	struct drm_device *dev = node->minor->dev;
    868 	struct amdgpu_device *adev = dev->dev_private;
    869 	int i, j;
    870 
    871 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
    872 		struct amdgpu_ring *ring = adev->rings[i];
    873 		if (!ring || !ring->fence_drv.initialized)
    874 			continue;
    875 
    876 		amdgpu_fence_process(ring);
    877 
    878 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
    879 		seq_printf(m, "Last signaled fence 0x%016llx\n",
    880 			   (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
    881 		seq_printf(m, "Last emitted        0x%016"PRIx64"\n",
    882 			   ring->fence_drv.sync_seq[i]);
    883 
    884 		for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
    885 			struct amdgpu_ring *other = adev->rings[j];
    886 			if (i != j && other && other->fence_drv.initialized &&
    887 			    ring->fence_drv.sync_seq[j])
    888 				seq_printf(m, "Last sync to ring %d 0x%016"PRIx64"\n",
    889 					   j, ring->fence_drv.sync_seq[j]);
    890 		}
    891 	}
    892 	return 0;
    893 }
    894 
    895 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
    896 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
    897 };
    898 #endif
    899 
    900 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
    901 {
    902 #if defined(CONFIG_DEBUG_FS)
    903 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
    904 #else
    905 	return 0;
    906 #endif
    907 }
    908 
    909