Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_gart.c revision 1.6
      1 /*	$NetBSD: amdgpu_gart.c,v 1.6 2021/12/19 12:02:39 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gart.c,v 1.6 2021/12/19 12:02:39 riastradh Exp $");
     33 
     34 #include <linux/pci.h>
     35 #include <linux/vmalloc.h>
     36 
     37 #include <drm/amdgpu_drm.h>
     38 #ifdef CONFIG_X86
     39 #include <asm/set_memory.h>
     40 #endif
     41 #include "amdgpu.h"
     42 
     43 /*
     44  * GART
     45  * The GART (Graphics Aperture Remapping Table) is an aperture
     46  * in the GPU's address space.  System pages can be mapped into
     47  * the aperture and look like contiguous pages from the GPU's
     48  * perspective.  A page table maps the pages in the aperture
     49  * to the actual backing pages in system memory.
     50  *
     51  * Radeon GPUs support both an internal GART, as described above,
     52  * and AGP.  AGP works similarly, but the GART table is configured
     53  * and maintained by the northbridge rather than the driver.
     54  * Radeon hw has a separate AGP aperture that is programmed to
     55  * point to the AGP aperture provided by the northbridge and the
     56  * requests are passed through to the northbridge aperture.
     57  * Both AGP and internal GART can be used at the same time, however
     58  * that is not currently supported by the driver.
     59  *
     60  * This file handles the common internal GART management.
     61  */
     62 
     63 /*
     64  * Common GART table functions.
     65  */
     66 
     67 /**
     68  * amdgpu_dummy_page_init - init dummy page used by the driver
     69  *
     70  * @adev: amdgpu_device pointer
     71  *
     72  * Allocate the dummy page used by the driver (all asics).
     73  * This dummy page is used by the driver as a filler for gart entries
     74  * when pages are taken out of the GART
     75  * Returns 0 on sucess, -ENOMEM on failure.
     76  */
     77 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
     78 {
     79 #ifdef __NetBSD__
     80 	int rsegs;
     81 	int error;
     82 
     83 	/* XXX Can this be called more than once??  */
     84 	if (adev->dummy_page_map != NULL)
     85 		return 0;
     86 
     87 	error = bus_dmamem_alloc(adev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
     88 	    &adev->dummy_page_seg, 1, &rsegs, BUS_DMA_WAITOK);
     89 	if (error)
     90 		goto fail0;
     91 	KASSERT(rsegs == 1);
     92 	error = bus_dmamap_create(adev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
     93 	    BUS_DMA_WAITOK, &adev->dummy_page_map);
     94 	if (error)
     95 		goto fail1;
     96 	error = bus_dmamap_load_raw(adev->ddev->dmat, adev->dummy_page_map,
     97 	    &adev->dummy_page_seg, 1, PAGE_SIZE, BUS_DMA_WAITOK);
     98 	if (error)
     99 		goto fail2;
    100 
    101 	/* Success!  */
    102 	adev->dummy_page_addr = adev->dummy_page_map->dm_segs[0].ds_addr;
    103 	return 0;
    104 
    105 fail3: __unused
    106 	bus_dmamap_unload(adev->ddev->dmat, adev->dummy_page_map);
    107 fail2:	bus_dmamap_destroy(adev->ddev->dmat, adev->dummy_page_map);
    108 fail1:	bus_dmamem_free(adev->ddev->dmat, &adev->dummy_page_seg, 1);
    109 fail0:	KASSERT(error);
    110 	adev->dummy_page_map = NULL;
    111 	adev->dummy_page_addr = 0; /* paranoia */
    112 	/* XXX errno NetBSD->Linux */
    113 	return -error;
    114 #else  /* __NetBSD__ */
    115 	struct page *dummy_page = ttm_bo_glob.dummy_read_page;
    116 
    117 	if (adev->dummy_page_addr)
    118 		return 0;
    119 	adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
    120 					     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    121 	if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
    122 		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    123 		adev->dummy_page_addr = 0;
    124 		return -ENOMEM;
    125 	}
    126 	return 0;
    127 #endif	/* __NetBSD__ */
    128 }
    129 
    130 /**
    131  * amdgpu_dummy_page_fini - free dummy page used by the driver
    132  *
    133  * @adev: amdgpu_device pointer
    134  *
    135  * Frees the dummy page used by the driver (all asics).
    136  */
    137 static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
    138 {
    139 	if (!adev->dummy_page_addr)
    140 		return;
    141 #ifdef __NetBSD__
    142 	bus_dmamap_unload(adev->ddev->dmat, adev->dummy_page_map);
    143 	bus_dmamap_destroy(adev->ddev->dmat, adev->dummy_page_map);
    144 	bus_dmamem_free(adev->ddev->dmat, &adev->dummy_page_seg, 1);
    145 	adev->dummy_page_map = NULL;
    146 #else
    147 	pci_unmap_page(adev->pdev, adev->dummy_page_addr,
    148 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    149 #endif
    150 	adev->dummy_page_addr = 0;
    151 }
    152 
    153 /**
    154  * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
    155  *
    156  * @adev: amdgpu_device pointer
    157  *
    158  * Allocate video memory for GART page table
    159  * (pcie r4xx, r5xx+).  These asics require the
    160  * gart table to be in video memory.
    161  * Returns 0 for success, error for failure.
    162  */
    163 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
    164 {
    165 	int r;
    166 
    167 	if (adev->gart.bo == NULL) {
    168 		struct amdgpu_bo_param bp;
    169 
    170 		memset(&bp, 0, sizeof(bp));
    171 		bp.size = adev->gart.table_size;
    172 		bp.byte_align = PAGE_SIZE;
    173 		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
    174 		bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
    175 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
    176 		bp.type = ttm_bo_type_kernel;
    177 		bp.resv = NULL;
    178 		r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
    179 		if (r) {
    180 			return r;
    181 		}
    182 	}
    183 	return 0;
    184 }
    185 
    186 /**
    187  * amdgpu_gart_table_vram_pin - pin gart page table in vram
    188  *
    189  * @adev: amdgpu_device pointer
    190  *
    191  * Pin the GART page table in vram so it will not be moved
    192  * by the memory manager (pcie r4xx, r5xx+).  These asics require the
    193  * gart table to be in video memory.
    194  * Returns 0 for success, error for failure.
    195  */
    196 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
    197 {
    198 	int r;
    199 
    200 	r = amdgpu_bo_reserve(adev->gart.bo, false);
    201 	if (unlikely(r != 0))
    202 		return r;
    203 	r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
    204 	if (r) {
    205 		amdgpu_bo_unreserve(adev->gart.bo);
    206 		return r;
    207 	}
    208 	r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
    209 	if (r)
    210 		amdgpu_bo_unpin(adev->gart.bo);
    211 	amdgpu_bo_unreserve(adev->gart.bo);
    212 	return r;
    213 }
    214 
    215 /**
    216  * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
    217  *
    218  * @adev: amdgpu_device pointer
    219  *
    220  * Unpin the GART page table in vram (pcie r4xx, r5xx+).
    221  * These asics require the gart table to be in video memory.
    222  */
    223 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
    224 {
    225 	int r;
    226 
    227 	if (adev->gart.bo == NULL) {
    228 		return;
    229 	}
    230 	r = amdgpu_bo_reserve(adev->gart.bo, true);
    231 	if (likely(r == 0)) {
    232 		amdgpu_bo_kunmap(adev->gart.bo);
    233 		amdgpu_bo_unpin(adev->gart.bo);
    234 		amdgpu_bo_unreserve(adev->gart.bo);
    235 		adev->gart.ptr = NULL;
    236 	}
    237 }
    238 
    239 /**
    240  * amdgpu_gart_table_vram_free - free gart page table vram
    241  *
    242  * @adev: amdgpu_device pointer
    243  *
    244  * Free the video memory used for the GART page table
    245  * (pcie r4xx, r5xx+).  These asics require the gart table to
    246  * be in video memory.
    247  */
    248 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
    249 {
    250 	if (adev->gart.bo == NULL) {
    251 		return;
    252 	}
    253 	amdgpu_bo_unref(&adev->gart.bo);
    254 }
    255 
    256 #ifdef __NetBSD__
    257 static void
    258 amdgpu_gart_pre_update(struct amdgpu_device *adev, unsigned gpu_pgstart,
    259     unsigned gpu_npages)
    260 {
    261 
    262 	if (adev->gart.ag_table_map != NULL) {
    263 		const unsigned entsize =
    264 		    adev->gart.table_size / adev->gart.num_gpu_pages;
    265 
    266 		bus_dmamap_sync(adev->ddev->dmat, adev->gart.ag_table_map,
    267 		    gpu_pgstart*entsize, gpu_npages*entsize,
    268 		    BUS_DMASYNC_POSTWRITE);
    269 	}
    270 }
    271 
    272 static void
    273 amdgpu_gart_post_update(struct amdgpu_device *adev, unsigned gpu_pgstart,
    274     unsigned gpu_npages)
    275 {
    276 	unsigned i;
    277 
    278 	if (adev->gart.ag_table_map != NULL) {
    279 		const unsigned entsize =
    280 		    adev->gart.table_size / adev->gart.num_gpu_pages;
    281 
    282 		bus_dmamap_sync(adev->ddev->dmat, adev->gart.ag_table_map,
    283 		    gpu_pgstart*entsize, gpu_npages*entsize,
    284 		    BUS_DMASYNC_PREWRITE);
    285 	}
    286 	mb();			/* XXX why is bus_dmamap_sync not enough? */
    287 	amdgpu_asic_flush_hdp(adev, NULL);
    288 	for (i = 0; i < adev->num_vmhubs; i++)
    289 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    290 }
    291 #endif
    292 
    293 /*
    294  * Common gart functions.
    295  */
    296 #ifdef __NetBSD__
    297 void
    298 amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t gpu_start,
    299     unsigned npages)
    300 {
    301 	const unsigned gpu_per_cpu = AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    302 	const unsigned gpu_npages = (npages * gpu_per_cpu);
    303 	const uint64_t gpu_pgstart = (gpu_start / AMDGPU_GPU_PAGE_SIZE);
    304 	const uint64_t pgstart = (gpu_pgstart / gpu_per_cpu);
    305 	uint64_t pgno, gpu_pgno;
    306 	uint32_t flags = AMDGPU_PTE_SYSTEM;
    307 
    308 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    309 	KASSERT(npages <= adev->gart.num_cpu_pages);
    310 	KASSERT(gpu_npages <= adev->gart.num_cpu_pages);
    311 
    312 	if (!adev->gart.ready) {
    313 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    314 		return;
    315 	}
    316 
    317 	amdgpu_gart_pre_update(adev, gpu_pgstart, gpu_npages);
    318 	for (pgno = 0; pgno < npages; pgno++) {
    319 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    320 		adev->gart.pages[pgstart + pgno] = NULL;
    321 #endif
    322 
    323 		if (adev->gart.ptr == NULL)
    324 			continue;
    325 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
    326 			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
    327 			    gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno,
    328 			    adev->dummy_page_addr, flags);
    329 		}
    330 	}
    331 	amdgpu_gart_post_update(adev, gpu_pgstart, gpu_npages);
    332 }
    333 #else  /* __NetBSD__ */
    334 /**
    335  * amdgpu_gart_unbind - unbind pages from the gart page table
    336  *
    337  * @adev: amdgpu_device pointer
    338  * @offset: offset into the GPU's gart aperture
    339  * @pages: number of pages to unbind
    340  *
    341  * Unbinds the requested pages from the gart page table and
    342  * replaces them with the dummy page (all asics).
    343  * Returns 0 for success, -EINVAL for failure.
    344  */
    345 int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
    346 			int pages)
    347 {
    348 	unsigned t;
    349 	unsigned p;
    350 	int i, j;
    351 	u64 page_base;
    352 	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
    353 	uint64_t flags = 0;
    354 
    355 	if (!adev->gart.ready) {
    356 		WARN(1, "trying to unbind memory from uninitialized GART !\n");
    357 		return -EINVAL;
    358 	}
    359 
    360 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    361 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    362 	for (i = 0; i < pages; i++, p++) {
    363 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    364 		adev->gart.pages[p] = NULL;
    365 #endif
    366 		page_base = adev->dummy_page_addr;
    367 		if (!adev->gart.ptr)
    368 			continue;
    369 
    370 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
    371 			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
    372 					       t, page_base, flags);
    373 			page_base += AMDGPU_GPU_PAGE_SIZE;
    374 		}
    375 	}
    376 	mb();
    377 	amdgpu_asic_flush_hdp(adev, NULL);
    378 	for (i = 0; i < adev->num_vmhubs; i++)
    379 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    380 
    381 	return 0;
    382 }
    383 
    384 /**
    385  * amdgpu_gart_map - map dma_addresses into GART entries
    386  *
    387  * @adev: amdgpu_device pointer
    388  * @offset: offset into the GPU's gart aperture
    389  * @pages: number of pages to bind
    390  * @dma_addr: DMA addresses of pages
    391  * @flags: page table entry flags
    392  * @dst: CPU address of the gart table
    393  *
    394  * Map the dma_addresses into GART entries (all asics).
    395  * Returns 0 for success, -EINVAL for failure.
    396  */
    397 int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
    398 		    int pages, dma_addr_t *dma_addr, uint64_t flags,
    399 		    void *dst)
    400 {
    401 	uint64_t page_base;
    402 	unsigned i, j, t;
    403 
    404 	if (!adev->gart.ready) {
    405 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    406 		return -EINVAL;
    407 	}
    408 
    409 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    410 
    411 	for (i = 0; i < pages; i++) {
    412 		page_base = dma_addr[i];
    413 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
    414 			amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
    415 			page_base += AMDGPU_GPU_PAGE_SIZE;
    416 		}
    417 	}
    418 	return 0;
    419 }
    420 #endif	/* __NetBSD__ */
    421 
    422 #ifdef __NetBSD__
    423 int
    424 amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t gpu_start,
    425     unsigned npages, struct page **pages, bus_dmamap_t dmamap, uint32_t flags)
    426 {
    427 	const unsigned gpu_per_cpu = AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    428 	const unsigned gpu_npages = (npages * gpu_per_cpu);
    429 	const uint64_t gpu_pgstart = (gpu_start / AMDGPU_GPU_PAGE_SIZE);
    430 	const uint64_t pgstart = (gpu_pgstart / gpu_per_cpu);
    431 	uint64_t pgno, gpu_pgno;
    432 
    433 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    434 	KASSERT(npages == dmamap->dm_nsegs);
    435 	KASSERT(npages <= adev->gart.num_cpu_pages);
    436 	KASSERT(gpu_npages <= adev->gart.num_cpu_pages);
    437 
    438 	if (!adev->gart.ready) {
    439 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    440 		return -EINVAL;
    441 	}
    442 
    443 	amdgpu_gart_pre_update(adev, gpu_pgstart, gpu_npages);
    444 	for (pgno = 0; pgno < npages; pgno++) {
    445 		const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr;
    446 
    447 		KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE);
    448 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    449 		adev->gart.pages[pgstart + pgno] = NULL;
    450 #endif
    451 
    452 		if (adev->gart.ptr == NULL)
    453 			continue;
    454 
    455 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
    456 			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
    457 			    gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno,
    458 			    addr + gpu_pgno*AMDGPU_GPU_PAGE_SIZE, flags);
    459 		}
    460 	}
    461 	amdgpu_gart_post_update(adev, gpu_pgstart, gpu_npages);
    462 
    463 	return 0;
    464 }
    465 #else  /* __NetBSD__ */
    466 /**
    467  * amdgpu_gart_bind - bind pages into the gart page table
    468  *
    469  * @adev: amdgpu_device pointer
    470  * @offset: offset into the GPU's gart aperture
    471  * @pages: number of pages to bind
    472  * @pagelist: pages to bind
    473  * @dma_addr: DMA addresses of pages
    474  * @flags: page table entry flags
    475  *
    476  * Binds the requested pages to the gart page table
    477  * (all asics).
    478  * Returns 0 for success, -EINVAL for failure.
    479  */
    480 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
    481 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
    482 		     uint64_t flags)
    483 {
    484 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    485 	unsigned t,p;
    486 #endif
    487 	int r, i;
    488 
    489 	if (!adev->gart.ready) {
    490 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    491 		return -EINVAL;
    492 	}
    493 
    494 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    495 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    496 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    497 	for (i = 0; i < pages; i++, p++)
    498 		adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
    499 #endif
    500 
    501 	if (!adev->gart.ptr)
    502 		return 0;
    503 
    504 	r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
    505 		    adev->gart.ptr);
    506 	if (r)
    507 		return r;
    508 
    509 	mb();
    510 	amdgpu_asic_flush_hdp(adev, NULL);
    511 	for (i = 0; i < adev->num_vmhubs; i++)
    512 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    513 	return 0;
    514 }
    515 #endif
    516 
    517 /**
    518  * amdgpu_gart_init - init the driver info for managing the gart
    519  *
    520  * @adev: amdgpu_device pointer
    521  *
    522  * Allocate the dummy page and init the gart driver info (all asics).
    523  * Returns 0 for success, error for failure.
    524  */
    525 int amdgpu_gart_init(struct amdgpu_device *adev)
    526 {
    527 	int r;
    528 
    529 	if (adev->dummy_page_addr)
    530 		return 0;
    531 
    532 	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
    533 	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
    534 		DRM_ERROR("Page size is smaller than GPU page size!\n");
    535 		return -EINVAL;
    536 	}
    537 	r = amdgpu_gart_dummy_page_init(adev);
    538 	if (r)
    539 		return r;
    540 	/* Compute table size */
    541 	adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
    542 	adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
    543 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
    544 		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
    545 
    546 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    547 	/* Allocate pages table */
    548 	adev->gart.pages = vzalloc(array_size(sizeof(void *),
    549 					      adev->gart.num_cpu_pages));
    550 	if (adev->gart.pages == NULL)
    551 		return -ENOMEM;
    552 #endif
    553 
    554 	return 0;
    555 }
    556 
    557 /**
    558  * amdgpu_gart_fini - tear down the driver info for managing the gart
    559  *
    560  * @adev: amdgpu_device pointer
    561  *
    562  * Tear down the gart driver info and free the dummy page (all asics).
    563  */
    564 void amdgpu_gart_fini(struct amdgpu_device *adev)
    565 {
    566 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    567 	vfree(adev->gart.pages);
    568 	adev->gart.pages = NULL;
    569 #endif
    570 	amdgpu_gart_dummy_page_fini(adev);
    571 }
    572