Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_gart.c revision 1.5
      1 /*	$NetBSD: amdgpu_gart.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gart.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $");
     33 
     34 #include <linux/pci.h>
     35 #include <linux/vmalloc.h>
     36 
     37 #include <drm/amdgpu_drm.h>
     38 #ifdef CONFIG_X86
     39 #include <asm/set_memory.h>
     40 #endif
     41 #include "amdgpu.h"
     42 
     43 /*
     44  * GART
     45  * The GART (Graphics Aperture Remapping Table) is an aperture
     46  * in the GPU's address space.  System pages can be mapped into
     47  * the aperture and look like contiguous pages from the GPU's
     48  * perspective.  A page table maps the pages in the aperture
     49  * to the actual backing pages in system memory.
     50  *
     51  * Radeon GPUs support both an internal GART, as described above,
     52  * and AGP.  AGP works similarly, but the GART table is configured
     53  * and maintained by the northbridge rather than the driver.
     54  * Radeon hw has a separate AGP aperture that is programmed to
     55  * point to the AGP aperture provided by the northbridge and the
     56  * requests are passed through to the northbridge aperture.
     57  * Both AGP and internal GART can be used at the same time, however
     58  * that is not currently supported by the driver.
     59  *
     60  * This file handles the common internal GART management.
     61  */
     62 
     63 /*
     64  * Common GART table functions.
     65  */
     66 
     67 /**
     68  * amdgpu_dummy_page_init - init dummy page used by the driver
     69  *
     70  * @adev: amdgpu_device pointer
     71  *
     72  * Allocate the dummy page used by the driver (all asics).
     73  * This dummy page is used by the driver as a filler for gart entries
     74  * when pages are taken out of the GART
     75  * Returns 0 on sucess, -ENOMEM on failure.
     76  */
     77 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
     78 {
     79 #ifdef __NetBSD__
     80 	int rsegs;
     81 	int error;
     82 
     83 	/* XXX Can this be called more than once??  */
     84 	if (adev->dummy_page_map != NULL)
     85 		return 0;
     86 
     87 	error = bus_dmamem_alloc(adev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
     88 	    &adev->dummy_page_seg, 1, &rsegs, BUS_DMA_WAITOK);
     89 	if (error)
     90 		goto fail0;
     91 	KASSERT(rsegs == 1);
     92 	error = bus_dmamap_create(adev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
     93 	    BUS_DMA_WAITOK, &adev->dummy_page_map);
     94 	if (error)
     95 		goto fail1;
     96 	error = bus_dmamap_load_raw(adev->ddev->dmat, adev->dummy_page_map,
     97 	    &adev->dummy_page_seg, 1, PAGE_SIZE, BUS_DMA_WAITOK);
     98 	if (error)
     99 		goto fail2;
    100 
    101 	/* Success!  */
    102 	adev->dummy_page_addr = adev->dummy_page_map->dm_segs[0].ds_addr;
    103 	return 0;
    104 
    105 fail3: __unused
    106 	bus_dmamap_unload(adev->ddev->dmat, adev->dummy_page_map);
    107 fail2:	bus_dmamap_destroy(adev->ddev->dmat, adev->dummy_page_map);
    108 fail1:	bus_dmamem_free(adev->ddev->dmat, &adev->dummy_page_seg, 1);
    109 fail0:	KASSERT(error);
    110 	adev->dummy_page_map = NULL;
    111 	adev->dummy_page_addr = 0; /* paranoia */
    112 	/* XXX errno NetBSD->Linux */
    113 	return -error;
    114 #else  /* __NetBSD__ */
    115 	struct page *dummy_page = ttm_bo_glob.dummy_read_page;
    116 
    117 	if (adev->dummy_page_addr)
    118 		return 0;
    119 	adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
    120 					     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    121 	if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
    122 		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    123 		adev->dummy_page_addr = 0;
    124 		return -ENOMEM;
    125 	}
    126 	return 0;
    127 #endif	/* __NetBSD__ */
    128 }
    129 
    130 /**
    131  * amdgpu_dummy_page_fini - free dummy page used by the driver
    132  *
    133  * @adev: amdgpu_device pointer
    134  *
    135  * Frees the dummy page used by the driver (all asics).
    136  */
    137 static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
    138 {
    139 	if (!adev->dummy_page_addr)
    140 		return;
    141 #ifdef __NetBSD__
    142 	bus_dmamap_unload(adev->ddev->dmat, adev->dummy_page_map);
    143 	bus_dmamap_destroy(adev->ddev->dmat, adev->dummy_page_map);
    144 	bus_dmamem_free(adev->ddev->dmat, &adev->dummy_page_seg, 1);
    145 	adev->dummy_page_map = NULL;
    146 #else
    147 	pci_unmap_page(adev->pdev, adev->dummy_page_addr,
    148 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    149 #endif
    150 	adev->dummy_page_addr = 0;
    151 }
    152 
    153 /**
    154  * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
    155  *
    156  * @adev: amdgpu_device pointer
    157  *
    158  * Allocate video memory for GART page table
    159  * (pcie r4xx, r5xx+).  These asics require the
    160  * gart table to be in video memory.
    161  * Returns 0 for success, error for failure.
    162  */
    163 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
    164 {
    165 	int r;
    166 
    167 	if (adev->gart.bo == NULL) {
    168 		struct amdgpu_bo_param bp;
    169 
    170 		memset(&bp, 0, sizeof(bp));
    171 		bp.size = adev->gart.table_size;
    172 		bp.byte_align = PAGE_SIZE;
    173 		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
    174 		bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
    175 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
    176 		bp.type = ttm_bo_type_kernel;
    177 		bp.resv = NULL;
    178 		r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
    179 		if (r) {
    180 			return r;
    181 		}
    182 	}
    183 	return 0;
    184 }
    185 
    186 /**
    187  * amdgpu_gart_table_vram_pin - pin gart page table in vram
    188  *
    189  * @adev: amdgpu_device pointer
    190  *
    191  * Pin the GART page table in vram so it will not be moved
    192  * by the memory manager (pcie r4xx, r5xx+).  These asics require the
    193  * gart table to be in video memory.
    194  * Returns 0 for success, error for failure.
    195  */
    196 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
    197 {
    198 	int r;
    199 
    200 	r = amdgpu_bo_reserve(adev->gart.bo, false);
    201 	if (unlikely(r != 0))
    202 		return r;
    203 	r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
    204 	if (r) {
    205 		amdgpu_bo_unreserve(adev->gart.bo);
    206 		return r;
    207 	}
    208 	r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
    209 	if (r)
    210 		amdgpu_bo_unpin(adev->gart.bo);
    211 	amdgpu_bo_unreserve(adev->gart.bo);
    212 	return r;
    213 }
    214 
    215 /**
    216  * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
    217  *
    218  * @adev: amdgpu_device pointer
    219  *
    220  * Unpin the GART page table in vram (pcie r4xx, r5xx+).
    221  * These asics require the gart table to be in video memory.
    222  */
    223 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
    224 {
    225 	int r;
    226 
    227 	if (adev->gart.bo == NULL) {
    228 		return;
    229 	}
    230 	r = amdgpu_bo_reserve(adev->gart.bo, true);
    231 	if (likely(r == 0)) {
    232 		amdgpu_bo_kunmap(adev->gart.bo);
    233 		amdgpu_bo_unpin(adev->gart.bo);
    234 		amdgpu_bo_unreserve(adev->gart.bo);
    235 		adev->gart.ptr = NULL;
    236 	}
    237 }
    238 
    239 /**
    240  * amdgpu_gart_table_vram_free - free gart page table vram
    241  *
    242  * @adev: amdgpu_device pointer
    243  *
    244  * Free the video memory used for the GART page table
    245  * (pcie r4xx, r5xx+).  These asics require the gart table to
    246  * be in video memory.
    247  */
    248 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
    249 {
    250 	if (adev->gart.bo == NULL) {
    251 		return;
    252 	}
    253 	amdgpu_bo_unref(&adev->gart.bo);
    254 }
    255 
    256 #ifdef __NetBSD__
    257 static void
    258 amdgpu_gart_pre_update(struct amdgpu_device *adev, unsigned gpu_pgstart,
    259     unsigned gpu_npages)
    260 {
    261 
    262 	if (adev->gart.ag_table_map != NULL) {
    263 		const unsigned entsize =
    264 		    adev->gart.table_size / adev->gart.num_gpu_pages;
    265 
    266 		bus_dmamap_sync(adev->ddev->dmat, adev->gart.ag_table_map,
    267 		    gpu_pgstart*entsize, gpu_npages*entsize,
    268 		    BUS_DMASYNC_POSTWRITE);
    269 	}
    270 }
    271 
    272 static void
    273 amdgpu_gart_post_update(struct amdgpu_device *adev, unsigned gpu_pgstart,
    274     unsigned gpu_npages)
    275 {
    276 
    277 	if (adev->gart.ag_table_map != NULL) {
    278 		const unsigned entsize =
    279 		    adev->gart.table_size / adev->gart.num_gpu_pages;
    280 
    281 		bus_dmamap_sync(adev->ddev->dmat, adev->gart.ag_table_map,
    282 		    gpu_pgstart*entsize, gpu_npages*entsize,
    283 		    BUS_DMASYNC_PREWRITE);
    284 	}
    285 	mb();
    286 	amdgpu_gart_flush_gpu_tlb(adev, 0);
    287 }
    288 #endif
    289 
    290 /*
    291  * Common gart functions.
    292  */
    293 #ifdef __NetBSD__
    294 void
    295 amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t gpu_start,
    296     unsigned npages)
    297 {
    298 	const unsigned gpu_per_cpu = (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
    299 	const unsigned gpu_npages = (npages * gpu_per_cpu);
    300 	const uint64_t gpu_pgstart = (gpu_start / AMDGPU_GPU_PAGE_SIZE);
    301 	const uint64_t pgstart = (gpu_pgstart / gpu_per_cpu);
    302 	uint64_t pgno, gpu_pgno;
    303 	uint32_t flags = AMDGPU_PTE_SYSTEM;
    304 
    305 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    306 	KASSERT(npages <= adev->gart.num_cpu_pages);
    307 	KASSERT(gpu_npages <= adev->gart.num_cpu_pages);
    308 
    309 	if (!adev->gart.ready) {
    310 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    311 		return;
    312 	}
    313 
    314 	amdgpu_gart_pre_update(adev, gpu_pgstart, gpu_npages);
    315 	for (pgno = 0; pgno < npages; pgno++) {
    316 		if (adev->gart.pages[pgstart + pgno] == NULL)
    317 			continue;
    318 		adev->gart.pages[pgstart + pgno] = NULL;
    319 		adev->gart.pages_addr[pgstart + pgno] = adev->dummy_page.addr;
    320 
    321 		if (adev->gart.ptr == NULL)
    322 			continue;
    323 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
    324 			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
    325 			    gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno,
    326 			    adev->dummy_page.addr, flags);
    327 		}
    328 	}
    329 	amdgpu_gart_post_update(adev, gpu_pgstart, gpu_npages);
    330 }
    331 #else  /* __NetBSD__ */
    332 /**
    333  * amdgpu_gart_unbind - unbind pages from the gart page table
    334  *
    335  * @adev: amdgpu_device pointer
    336  * @offset: offset into the GPU's gart aperture
    337  * @pages: number of pages to unbind
    338  *
    339  * Unbinds the requested pages from the gart page table and
    340  * replaces them with the dummy page (all asics).
    341  * Returns 0 for success, -EINVAL for failure.
    342  */
    343 int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
    344 			int pages)
    345 {
    346 	unsigned t;
    347 	unsigned p;
    348 	int i, j;
    349 	u64 page_base;
    350 	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
    351 	uint64_t flags = 0;
    352 
    353 	if (!adev->gart.ready) {
    354 		WARN(1, "trying to unbind memory from uninitialized GART !\n");
    355 		return -EINVAL;
    356 	}
    357 
    358 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    359 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    360 	for (i = 0; i < pages; i++, p++) {
    361 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    362 		adev->gart.pages[p] = NULL;
    363 #endif
    364 		page_base = adev->dummy_page_addr;
    365 		if (!adev->gart.ptr)
    366 			continue;
    367 
    368 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
    369 			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
    370 					       t, page_base, flags);
    371 			page_base += AMDGPU_GPU_PAGE_SIZE;
    372 		}
    373 	}
    374 	mb();
    375 	amdgpu_asic_flush_hdp(adev, NULL);
    376 	for (i = 0; i < adev->num_vmhubs; i++)
    377 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    378 
    379 	return 0;
    380 }
    381 
    382 /**
    383  * amdgpu_gart_map - map dma_addresses into GART entries
    384  *
    385  * @adev: amdgpu_device pointer
    386  * @offset: offset into the GPU's gart aperture
    387  * @pages: number of pages to bind
    388  * @dma_addr: DMA addresses of pages
    389  * @flags: page table entry flags
    390  * @dst: CPU address of the gart table
    391  *
    392  * Map the dma_addresses into GART entries (all asics).
    393  * Returns 0 for success, -EINVAL for failure.
    394  */
    395 int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
    396 		    int pages, dma_addr_t *dma_addr, uint64_t flags,
    397 		    void *dst)
    398 {
    399 	uint64_t page_base;
    400 	unsigned i, j, t;
    401 
    402 	if (!adev->gart.ready) {
    403 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    404 		return -EINVAL;
    405 	}
    406 
    407 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    408 
    409 	for (i = 0; i < pages; i++) {
    410 		page_base = dma_addr[i];
    411 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
    412 			amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
    413 			page_base += AMDGPU_GPU_PAGE_SIZE;
    414 		}
    415 	}
    416 	return 0;
    417 }
    418 #endif	/* __NetBSD__ */
    419 
    420 #ifdef __NetBSD__
    421 int
    422 amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t gpu_start,
    423     unsigned npages, struct page **pages, bus_dmamap_t dmamap, uint32_t flags)
    424 {
    425 	const unsigned gpu_per_cpu = (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
    426 	const unsigned gpu_npages = (npages * gpu_per_cpu);
    427 	const uint64_t gpu_pgstart = (gpu_start / AMDGPU_GPU_PAGE_SIZE);
    428 	const uint64_t pgstart = (gpu_pgstart / gpu_per_cpu);
    429 	uint64_t pgno, gpu_pgno;
    430 
    431 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    432 	KASSERT(npages == dmamap->dm_nsegs);
    433 	KASSERT(npages <= adev->gart.num_cpu_pages);
    434 	KASSERT(gpu_npages <= adev->gart.num_cpu_pages);
    435 
    436 	if (!adev->gart.ready) {
    437 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    438 		return -EINVAL;
    439 	}
    440 
    441 	amdgpu_gart_pre_update(adev, gpu_pgstart, gpu_npages);
    442 	for (pgno = 0; pgno < npages; pgno++) {
    443 		const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr;
    444 
    445 		KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE);
    446 		adev->gart.pages[pgstart + pgno] = pages[pgno];
    447 		adev->gart.pages_addr[pgstart + pgno] = addr;
    448 
    449 		if (adev->gart.ptr == NULL)
    450 			continue;
    451 
    452 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
    453 			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
    454 			    gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno,
    455 			    addr + gpu_pgno*AMDGPU_GPU_PAGE_SIZE, flags);
    456 		}
    457 	}
    458 	amdgpu_gart_post_update(adev, gpu_pgstart, gpu_npages);
    459 
    460 	return 0;
    461 }
    462 #else  /* __NetBSD__ */
    463 /**
    464  * amdgpu_gart_bind - bind pages into the gart page table
    465  *
    466  * @adev: amdgpu_device pointer
    467  * @offset: offset into the GPU's gart aperture
    468  * @pages: number of pages to bind
    469  * @pagelist: pages to bind
    470  * @dma_addr: DMA addresses of pages
    471  * @flags: page table entry flags
    472  *
    473  * Binds the requested pages to the gart page table
    474  * (all asics).
    475  * Returns 0 for success, -EINVAL for failure.
    476  */
    477 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
    478 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
    479 		     uint64_t flags)
    480 {
    481 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    482 	unsigned t,p;
    483 #endif
    484 	int r, i;
    485 
    486 	if (!adev->gart.ready) {
    487 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    488 		return -EINVAL;
    489 	}
    490 
    491 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    492 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    493 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    494 	for (i = 0; i < pages; i++, p++)
    495 		adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
    496 #endif
    497 
    498 	if (!adev->gart.ptr)
    499 		return 0;
    500 
    501 	r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
    502 		    adev->gart.ptr);
    503 	if (r)
    504 		return r;
    505 
    506 	mb();
    507 	amdgpu_asic_flush_hdp(adev, NULL);
    508 	for (i = 0; i < adev->num_vmhubs; i++)
    509 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    510 	return 0;
    511 }
    512 #endif
    513 
    514 /**
    515  * amdgpu_gart_init - init the driver info for managing the gart
    516  *
    517  * @adev: amdgpu_device pointer
    518  *
    519  * Allocate the dummy page and init the gart driver info (all asics).
    520  * Returns 0 for success, error for failure.
    521  */
    522 int amdgpu_gart_init(struct amdgpu_device *adev)
    523 {
    524 	int r;
    525 
    526 	if (adev->dummy_page_addr)
    527 		return 0;
    528 
    529 	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
    530 	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
    531 		DRM_ERROR("Page size is smaller than GPU page size!\n");
    532 		return -EINVAL;
    533 	}
    534 	r = amdgpu_gart_dummy_page_init(adev);
    535 	if (r)
    536 		return r;
    537 	/* Compute table size */
    538 	adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
    539 	adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
    540 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
    541 		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
    542 
    543 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    544 	/* Allocate pages table */
    545 	adev->gart.pages = vzalloc(array_size(sizeof(void *),
    546 					      adev->gart.num_cpu_pages));
    547 	if (adev->gart.pages == NULL)
    548 		return -ENOMEM;
    549 #endif
    550 
    551 	return 0;
    552 }
    553 
    554 /**
    555  * amdgpu_gart_fini - tear down the driver info for managing the gart
    556  *
    557  * @adev: amdgpu_device pointer
    558  *
    559  * Tear down the gart driver info and free the dummy page (all asics).
    560  */
    561 void amdgpu_gart_fini(struct amdgpu_device *adev)
    562 {
    563 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    564 	vfree(adev->gart.pages);
    565 	adev->gart.pages = NULL;
    566 #endif
    567 	amdgpu_gart_dummy_page_fini(adev);
    568 }
    569