1 1.1 riastrad /* $NetBSD: amdgpu_ib.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2008 Advanced Micro Devices, Inc. 5 1.1 riastrad * Copyright 2008 Red Hat Inc. 6 1.1 riastrad * Copyright 2009 Jerome Glisse. 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 10 1.1 riastrad * to deal in the Software without restriction, including without limitation 11 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 13 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 14 1.1 riastrad * 15 1.1 riastrad * The above copyright notice and this permission notice shall be included in 16 1.1 riastrad * all copies or substantial portions of the Software. 17 1.1 riastrad * 18 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 25 1.1 riastrad * 26 1.1 riastrad * Authors: Dave Airlie 27 1.1 riastrad * Alex Deucher 28 1.1 riastrad * Jerome Glisse 29 1.1 riastrad * Christian Knig 30 1.1 riastrad */ 31 1.1 riastrad #include <sys/cdefs.h> 32 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_ib.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $"); 33 1.1 riastrad 34 1.1 riastrad #include <linux/seq_file.h> 35 1.1 riastrad #include <linux/slab.h> 36 1.3 riastrad 37 1.1 riastrad #include <drm/amdgpu_drm.h> 38 1.3 riastrad #include <drm/drm_debugfs.h> 39 1.3 riastrad 40 1.1 riastrad #include "amdgpu.h" 41 1.1 riastrad #include "atom.h" 42 1.3 riastrad #include "amdgpu_trace.h" 43 1.3 riastrad 44 1.3 riastrad #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) 45 1.3 riastrad #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) 46 1.1 riastrad 47 1.1 riastrad /* 48 1.1 riastrad * IB 49 1.1 riastrad * IBs (Indirect Buffers) and areas of GPU accessible memory where 50 1.1 riastrad * commands are stored. You can put a pointer to the IB in the 51 1.1 riastrad * command ring and the hw will fetch the commands from the IB 52 1.1 riastrad * and execute them. Generally userspace acceleration drivers 53 1.1 riastrad * produce command buffers which are send to the kernel and 54 1.1 riastrad * put in IBs for execution by the requested ring. 55 1.1 riastrad */ 56 1.1 riastrad static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 57 1.1 riastrad 58 1.1 riastrad /** 59 1.1 riastrad * amdgpu_ib_get - request an IB (Indirect Buffer) 60 1.1 riastrad * 61 1.1 riastrad * @ring: ring index the IB is associated with 62 1.1 riastrad * @size: requested IB size 63 1.1 riastrad * @ib: IB object returned 64 1.1 riastrad * 65 1.1 riastrad * Request an IB (all asics). IBs are allocated using the 66 1.1 riastrad * suballocator. 67 1.1 riastrad * Returns 0 on success, error on failure. 68 1.1 riastrad */ 69 1.3 riastrad int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 70 1.1 riastrad unsigned size, struct amdgpu_ib *ib) 71 1.1 riastrad { 72 1.1 riastrad int r; 73 1.1 riastrad 74 1.1 riastrad if (size) { 75 1.1 riastrad r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 76 1.1 riastrad &ib->sa_bo, size, 256); 77 1.1 riastrad if (r) { 78 1.1 riastrad dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 79 1.1 riastrad return r; 80 1.1 riastrad } 81 1.1 riastrad 82 1.1 riastrad ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 83 1.1 riastrad 84 1.1 riastrad if (!vm) 85 1.1 riastrad ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 86 1.1 riastrad } 87 1.1 riastrad 88 1.1 riastrad return 0; 89 1.1 riastrad } 90 1.1 riastrad 91 1.1 riastrad /** 92 1.1 riastrad * amdgpu_ib_free - free an IB (Indirect Buffer) 93 1.1 riastrad * 94 1.1 riastrad * @adev: amdgpu_device pointer 95 1.1 riastrad * @ib: IB object to free 96 1.3 riastrad * @f: the fence SA bo need wait on for the ib alloation 97 1.1 riastrad * 98 1.1 riastrad * Free an IB (all asics). 99 1.1 riastrad */ 100 1.3 riastrad void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 101 1.3 riastrad struct dma_fence *f) 102 1.1 riastrad { 103 1.3 riastrad amdgpu_sa_bo_free(adev, &ib->sa_bo, f); 104 1.1 riastrad } 105 1.1 riastrad 106 1.1 riastrad /** 107 1.1 riastrad * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring 108 1.1 riastrad * 109 1.1 riastrad * @adev: amdgpu_device pointer 110 1.1 riastrad * @num_ibs: number of IBs to schedule 111 1.1 riastrad * @ibs: IB objects to schedule 112 1.3 riastrad * @f: fence created during this submission 113 1.1 riastrad * 114 1.1 riastrad * Schedule an IB on the associated ring (all asics). 115 1.1 riastrad * Returns 0 on success, error on failure. 116 1.1 riastrad * 117 1.1 riastrad * On SI, there are two parallel engines fed from the primary ring, 118 1.1 riastrad * the CE (Constant Engine) and the DE (Drawing Engine). Since 119 1.1 riastrad * resource descriptors have moved to memory, the CE allows you to 120 1.1 riastrad * prime the caches while the DE is updating register state so that 121 1.1 riastrad * the resource descriptors will be already in cache when the draw is 122 1.1 riastrad * processed. To accomplish this, the userspace driver submits two 123 1.1 riastrad * IBs, one for the CE and one for the DE. If there is a CE IB (called 124 1.1 riastrad * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 125 1.1 riastrad * to SI there was just a DE IB. 126 1.1 riastrad */ 127 1.3 riastrad int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 128 1.3 riastrad struct amdgpu_ib *ibs, struct amdgpu_job *job, 129 1.3 riastrad struct dma_fence **f) 130 1.1 riastrad { 131 1.3 riastrad struct amdgpu_device *adev = ring->adev; 132 1.1 riastrad struct amdgpu_ib *ib = &ibs[0]; 133 1.3 riastrad struct dma_fence *tmp = NULL; 134 1.3 riastrad bool skip_preamble, need_ctx_switch; 135 1.3 riastrad unsigned patch_offset = ~0; 136 1.1 riastrad struct amdgpu_vm *vm; 137 1.3 riastrad uint64_t fence_ctx; 138 1.3 riastrad uint32_t status = 0, alloc_size; 139 1.3 riastrad unsigned fence_flags = 0; 140 1.3 riastrad 141 1.1 riastrad unsigned i; 142 1.1 riastrad int r = 0; 143 1.3 riastrad bool need_pipe_sync = false; 144 1.1 riastrad 145 1.1 riastrad if (num_ibs == 0) 146 1.1 riastrad return -EINVAL; 147 1.1 riastrad 148 1.3 riastrad /* ring tests don't use a job */ 149 1.3 riastrad if (job) { 150 1.3 riastrad vm = job->vm; 151 1.3 riastrad fence_ctx = job->base.s_fence ? 152 1.3 riastrad job->base.s_fence->scheduled.context : 0; 153 1.3 riastrad } else { 154 1.3 riastrad vm = NULL; 155 1.3 riastrad fence_ctx = 0; 156 1.3 riastrad } 157 1.1 riastrad 158 1.3 riastrad if (!ring->sched.ready) { 159 1.3 riastrad dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); 160 1.1 riastrad return -EINVAL; 161 1.1 riastrad } 162 1.3 riastrad 163 1.3 riastrad if (vm && !job->vmid) { 164 1.3 riastrad dev_err(adev->dev, "VM IB without ID\n"); 165 1.3 riastrad return -EINVAL; 166 1.1 riastrad } 167 1.3 riastrad 168 1.3 riastrad alloc_size = ring->funcs->emit_frame_size + num_ibs * 169 1.3 riastrad ring->funcs->emit_ib_size; 170 1.3 riastrad 171 1.3 riastrad r = amdgpu_ring_alloc(ring, alloc_size); 172 1.1 riastrad if (r) { 173 1.1 riastrad dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 174 1.1 riastrad return r; 175 1.1 riastrad } 176 1.1 riastrad 177 1.3 riastrad need_ctx_switch = ring->current_ctx != fence_ctx; 178 1.3 riastrad if (ring->funcs->emit_pipeline_sync && job && 179 1.3 riastrad ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || 180 1.3 riastrad (amdgpu_sriov_vf(adev) && need_ctx_switch) || 181 1.3 riastrad amdgpu_vm_need_pipeline_sync(ring, job))) { 182 1.3 riastrad need_pipe_sync = true; 183 1.3 riastrad 184 1.3 riastrad if (tmp) 185 1.3 riastrad trace_amdgpu_ib_pipe_sync(job, tmp); 186 1.3 riastrad 187 1.3 riastrad dma_fence_put(tmp); 188 1.3 riastrad } 189 1.3 riastrad 190 1.3 riastrad if (ring->funcs->insert_start) 191 1.3 riastrad ring->funcs->insert_start(ring); 192 1.3 riastrad 193 1.3 riastrad if (job) { 194 1.3 riastrad r = amdgpu_vm_flush(ring, job, need_pipe_sync); 195 1.1 riastrad if (r) { 196 1.3 riastrad amdgpu_ring_undo(ring); 197 1.1 riastrad return r; 198 1.1 riastrad } 199 1.1 riastrad } 200 1.1 riastrad 201 1.3 riastrad if (job && ring->funcs->init_cond_exec) 202 1.3 riastrad patch_offset = amdgpu_ring_init_cond_exec(ring); 203 1.3 riastrad 204 1.3 riastrad #ifdef CONFIG_X86_64 205 1.3 riastrad if (!(adev->flags & AMD_IS_APU)) 206 1.3 riastrad #endif 207 1.3 riastrad { 208 1.3 riastrad if (ring->funcs->emit_hdp_flush) 209 1.3 riastrad amdgpu_ring_emit_hdp_flush(ring); 210 1.3 riastrad else 211 1.3 riastrad amdgpu_asic_flush_hdp(adev, ring); 212 1.1 riastrad } 213 1.1 riastrad 214 1.3 riastrad if (need_ctx_switch) 215 1.3 riastrad status |= AMDGPU_HAVE_CTX_SWITCH; 216 1.1 riastrad 217 1.3 riastrad skip_preamble = ring->current_ctx == fence_ctx; 218 1.3 riastrad if (job && ring->funcs->emit_cntxcntl) { 219 1.3 riastrad status |= job->preamble_status; 220 1.3 riastrad status |= job->preemption_status; 221 1.3 riastrad amdgpu_ring_emit_cntxcntl(ring, status); 222 1.1 riastrad } 223 1.1 riastrad 224 1.1 riastrad for (i = 0; i < num_ibs; ++i) { 225 1.1 riastrad ib = &ibs[i]; 226 1.1 riastrad 227 1.3 riastrad /* drop preamble IBs if we don't have a context switch */ 228 1.3 riastrad if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && 229 1.3 riastrad skip_preamble && 230 1.3 riastrad !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && 231 1.3 riastrad !amdgpu_mcbp && 232 1.3 riastrad !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ 233 1.3 riastrad continue; 234 1.3 riastrad 235 1.3 riastrad amdgpu_ring_emit_ib(ring, job, ib, status); 236 1.3 riastrad status &= ~AMDGPU_HAVE_CTX_SWITCH; 237 1.3 riastrad } 238 1.3 riastrad 239 1.3 riastrad if (ring->funcs->emit_tmz) 240 1.3 riastrad amdgpu_ring_emit_tmz(ring, false); 241 1.3 riastrad 242 1.3 riastrad #ifdef CONFIG_X86_64 243 1.3 riastrad if (!(adev->flags & AMD_IS_APU)) 244 1.3 riastrad #endif 245 1.3 riastrad amdgpu_asic_invalidate_hdp(adev, ring); 246 1.3 riastrad 247 1.3 riastrad if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) 248 1.3 riastrad fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; 249 1.3 riastrad 250 1.3 riastrad /* wrap the last IB with fence */ 251 1.3 riastrad if (job && job->uf_addr) { 252 1.3 riastrad amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 253 1.3 riastrad fence_flags | AMDGPU_FENCE_FLAG_64BIT); 254 1.1 riastrad } 255 1.1 riastrad 256 1.3 riastrad r = amdgpu_fence_emit(ring, f, fence_flags); 257 1.1 riastrad if (r) { 258 1.1 riastrad dev_err(adev->dev, "failed to emit fence (%d)\n", r); 259 1.3 riastrad if (job && job->vmid) 260 1.3 riastrad amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid); 261 1.3 riastrad amdgpu_ring_undo(ring); 262 1.1 riastrad return r; 263 1.1 riastrad } 264 1.1 riastrad 265 1.3 riastrad if (ring->funcs->insert_end) 266 1.3 riastrad ring->funcs->insert_end(ring); 267 1.1 riastrad 268 1.3 riastrad if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 269 1.3 riastrad amdgpu_ring_patch_cond_exec(ring, patch_offset); 270 1.1 riastrad 271 1.3 riastrad ring->current_ctx = fence_ctx; 272 1.3 riastrad if (vm && ring->funcs->emit_switch_buffer) 273 1.3 riastrad amdgpu_ring_emit_switch_buffer(ring); 274 1.3 riastrad amdgpu_ring_commit(ring); 275 1.1 riastrad return 0; 276 1.1 riastrad } 277 1.1 riastrad 278 1.1 riastrad /** 279 1.1 riastrad * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool 280 1.1 riastrad * 281 1.1 riastrad * @adev: amdgpu_device pointer 282 1.1 riastrad * 283 1.1 riastrad * Initialize the suballocator to manage a pool of memory 284 1.1 riastrad * for use as IBs (all asics). 285 1.1 riastrad * Returns 0 on success, error on failure. 286 1.1 riastrad */ 287 1.1 riastrad int amdgpu_ib_pool_init(struct amdgpu_device *adev) 288 1.1 riastrad { 289 1.1 riastrad int r; 290 1.1 riastrad 291 1.1 riastrad if (adev->ib_pool_ready) { 292 1.1 riastrad return 0; 293 1.1 riastrad } 294 1.1 riastrad r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, 295 1.1 riastrad AMDGPU_IB_POOL_SIZE*64*1024, 296 1.1 riastrad AMDGPU_GPU_PAGE_SIZE, 297 1.1 riastrad AMDGPU_GEM_DOMAIN_GTT); 298 1.1 riastrad if (r) { 299 1.1 riastrad return r; 300 1.1 riastrad } 301 1.1 riastrad 302 1.1 riastrad adev->ib_pool_ready = true; 303 1.1 riastrad if (amdgpu_debugfs_sa_init(adev)) { 304 1.1 riastrad dev_err(adev->dev, "failed to register debugfs file for SA\n"); 305 1.1 riastrad } 306 1.1 riastrad return 0; 307 1.1 riastrad } 308 1.1 riastrad 309 1.1 riastrad /** 310 1.1 riastrad * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool 311 1.1 riastrad * 312 1.1 riastrad * @adev: amdgpu_device pointer 313 1.1 riastrad * 314 1.1 riastrad * Tear down the suballocator managing the pool of memory 315 1.1 riastrad * for use as IBs (all asics). 316 1.1 riastrad */ 317 1.1 riastrad void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 318 1.1 riastrad { 319 1.1 riastrad if (adev->ib_pool_ready) { 320 1.1 riastrad amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); 321 1.1 riastrad adev->ib_pool_ready = false; 322 1.1 riastrad } 323 1.1 riastrad } 324 1.1 riastrad 325 1.1 riastrad /** 326 1.1 riastrad * amdgpu_ib_ring_tests - test IBs on the rings 327 1.1 riastrad * 328 1.1 riastrad * @adev: amdgpu_device pointer 329 1.1 riastrad * 330 1.1 riastrad * Test an IB (Indirect Buffer) on each ring. 331 1.1 riastrad * If the test fails, disable the ring. 332 1.1 riastrad * Returns 0 on success, error if the primary GFX ring 333 1.1 riastrad * IB test fails. 334 1.1 riastrad */ 335 1.1 riastrad int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 336 1.1 riastrad { 337 1.1 riastrad unsigned i; 338 1.1 riastrad int r, ret = 0; 339 1.3 riastrad long tmo_gfx, tmo_mm; 340 1.1 riastrad 341 1.3 riastrad tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; 342 1.3 riastrad if (amdgpu_sriov_vf(adev)) { 343 1.3 riastrad /* for MM engines in hypervisor side they are not scheduled together 344 1.3 riastrad * with CP and SDMA engines, so even in exclusive mode MM engine could 345 1.3 riastrad * still running on other VF thus the IB TEST TIMEOUT for MM engines 346 1.3 riastrad * under SR-IOV should be set to a long time. 8 sec should be enough 347 1.3 riastrad * for the MM comes back to this VF. 348 1.3 riastrad */ 349 1.3 riastrad tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; 350 1.3 riastrad } 351 1.3 riastrad 352 1.3 riastrad if (amdgpu_sriov_runtime(adev)) { 353 1.3 riastrad /* for CP & SDMA engines since they are scheduled together so 354 1.3 riastrad * need to make the timeout width enough to cover the time 355 1.3 riastrad * cost waiting for it coming back under RUNTIME only 356 1.3 riastrad */ 357 1.3 riastrad tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; 358 1.3 riastrad } else if (adev->gmc.xgmi.hive_id) { 359 1.3 riastrad tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; 360 1.3 riastrad } 361 1.3 riastrad 362 1.3 riastrad for (i = 0; i < adev->num_rings; ++i) { 363 1.1 riastrad struct amdgpu_ring *ring = adev->rings[i]; 364 1.3 riastrad long tmo; 365 1.3 riastrad 366 1.3 riastrad /* KIQ rings don't have an IB test because we never submit IBs 367 1.3 riastrad * to them and they have no interrupt support. 368 1.3 riastrad */ 369 1.3 riastrad if (!ring->sched.ready || !ring->funcs->test_ib) 370 1.3 riastrad continue; 371 1.1 riastrad 372 1.3 riastrad /* MM engine need more time */ 373 1.3 riastrad if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || 374 1.3 riastrad ring->funcs->type == AMDGPU_RING_TYPE_VCE || 375 1.3 riastrad ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || 376 1.3 riastrad ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || 377 1.3 riastrad ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || 378 1.3 riastrad ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 379 1.3 riastrad tmo = tmo_mm; 380 1.3 riastrad else 381 1.3 riastrad tmo = tmo_gfx; 382 1.3 riastrad 383 1.3 riastrad r = amdgpu_ring_test_ib(ring, tmo); 384 1.3 riastrad if (!r) { 385 1.3 riastrad DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", 386 1.3 riastrad ring->name); 387 1.1 riastrad continue; 388 1.3 riastrad } 389 1.1 riastrad 390 1.3 riastrad ring->sched.ready = false; 391 1.3 riastrad DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n", 392 1.3 riastrad ring->name, r); 393 1.3 riastrad 394 1.3 riastrad if (ring == &adev->gfx.gfx_ring[0]) { 395 1.3 riastrad /* oh, oh, that's really bad */ 396 1.3 riastrad adev->accel_working = false; 397 1.3 riastrad return r; 398 1.1 riastrad 399 1.3 riastrad } else { 400 1.3 riastrad ret = r; 401 1.1 riastrad } 402 1.1 riastrad } 403 1.1 riastrad return ret; 404 1.1 riastrad } 405 1.1 riastrad 406 1.1 riastrad /* 407 1.1 riastrad * Debugfs info 408 1.1 riastrad */ 409 1.1 riastrad #if defined(CONFIG_DEBUG_FS) 410 1.1 riastrad 411 1.1 riastrad static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) 412 1.1 riastrad { 413 1.1 riastrad struct drm_info_node *node = (struct drm_info_node *) m->private; 414 1.1 riastrad struct drm_device *dev = node->minor->dev; 415 1.1 riastrad struct amdgpu_device *adev = dev->dev_private; 416 1.1 riastrad 417 1.1 riastrad amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); 418 1.1 riastrad 419 1.1 riastrad return 0; 420 1.1 riastrad 421 1.1 riastrad } 422 1.1 riastrad 423 1.3 riastrad static const struct drm_info_list amdgpu_debugfs_sa_list[] = { 424 1.1 riastrad {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, 425 1.1 riastrad }; 426 1.1 riastrad 427 1.1 riastrad #endif 428 1.1 riastrad 429 1.1 riastrad static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) 430 1.1 riastrad { 431 1.1 riastrad #if defined(CONFIG_DEBUG_FS) 432 1.1 riastrad return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); 433 1.1 riastrad #else 434 1.1 riastrad return 0; 435 1.1 riastrad #endif 436 1.1 riastrad } 437