Home | History | Annotate | Line # | Download | only in radeon
radeon_gart.c revision 1.2
      1 /*
      2  * Copyright 2008 Advanced Micro Devices, Inc.
      3  * Copyright 2008 Red Hat Inc.
      4  * Copyright 2009 Jerome Glisse.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Dave Airlie
     25  *          Alex Deucher
     26  *          Jerome Glisse
     27  */
     28 #include <drm/drmP.h>
     29 #include <drm/radeon_drm.h>
     30 #include "radeon.h"
     31 
     32 /*
     33  * GART
     34  * The GART (Graphics Aperture Remapping Table) is an aperture
     35  * in the GPU's address space.  System pages can be mapped into
     36  * the aperture and look like contiguous pages from the GPU's
     37  * perspective.  A page table maps the pages in the aperture
     38  * to the actual backing pages in system memory.
     39  *
     40  * Radeon GPUs support both an internal GART, as described above,
     41  * and AGP.  AGP works similarly, but the GART table is configured
     42  * and maintained by the northbridge rather than the driver.
     43  * Radeon hw has a separate AGP aperture that is programmed to
     44  * point to the AGP aperture provided by the northbridge and the
     45  * requests are passed through to the northbridge aperture.
     46  * Both AGP and internal GART can be used at the same time, however
     47  * that is not currently supported by the driver.
     48  *
     49  * This file handles the common internal GART management.
     50  */
     51 
     52 /*
     53  * Common GART table functions.
     54  */
     55 /**
     56  * radeon_gart_table_ram_alloc - allocate system ram for gart page table
     57  *
     58  * @rdev: radeon_device pointer
     59  *
     60  * Allocate system memory for GART page table
     61  * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
     62  * gart table to be in system memory.
     63  * Returns 0 for success, -ENOMEM for failure.
     64  */
     65 int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
     66 {
     67 #ifdef __NetBSD__
     68 	int rsegs;
     69 	int error;
     70 
     71 	error = bus_dmamem_alloc(rdev->ddev->dmat, rdev->gart.table_size,
     72 	    PAGE_SIZE, 0, &rdev->gart.rg_table_seg, 1, &rsegs, BUS_DMA_WAITOK);
     73 	if (error)
     74 		goto fail0;
     75 	KASSERT(rsegs == 1);
     76 	error = bus_dmamap_create(rdev->ddev->dmat, rdev->gart.table_size, 1,
     77 	    rdev->gart.table_size, 0, BUS_DMA_WAITOK,
     78 	    &rdev->gart.rg_table_map);
     79 	if (error)
     80 		goto fail1;
     81 	error = bus_dmamem_map(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1,
     82 	    rdev->gart.table_size, &rdev->gart.ptr,
     83 	    BUS_DMA_WAITOK|BUS_DMA_NOCACHE);
     84 	if (error)
     85 		goto fail2;
     86 	error = bus_dmamap_load(rdev->ddev->dmat, rdev->gart.rg_table_map,
     87 	    rdev->gart.ptr, rdev->gart.table_size, NULL, BUS_DMA_WAITOK);
     88 	if (error)
     89 		goto fail3;
     90 
     91 	/* Success!  */
     92 	rdev->gart.table_addr = rdev->gart.rg_table_map->dm_segs[0].ds_addr;
     93 	return 0;
     94 
     95 fail4: __unused
     96 	bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
     97 fail3:	bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
     98 	    rdev->gart.table_size);
     99 fail2:	bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
    100 fail1:	bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
    101 fail0:	KASSERT(error);
    102 	/* XXX errno NetBSD->Linux */
    103 	return -error;
    104 #else
    105 	void *ptr;
    106 
    107 	ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
    108 				   &rdev->gart.table_addr);
    109 	if (ptr == NULL) {
    110 		return -ENOMEM;
    111 	}
    112 #ifdef CONFIG_X86
    113 	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
    114 	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
    115 		set_memory_uc((unsigned long)ptr,
    116 			      rdev->gart.table_size >> PAGE_SHIFT);
    117 	}
    118 #endif
    119 	rdev->gart.ptr = ptr;
    120 	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
    121 	return 0;
    122 #endif
    123 }
    124 
    125 /**
    126  * radeon_gart_table_ram_free - free system ram for gart page table
    127  *
    128  * @rdev: radeon_device pointer
    129  *
    130  * Free system memory for GART page table
    131  * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
    132  * gart table to be in system memory.
    133  */
    134 void radeon_gart_table_ram_free(struct radeon_device *rdev)
    135 {
    136 	if (rdev->gart.ptr == NULL) {
    137 		return;
    138 	}
    139 #ifdef __NetBSD__
    140 	bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
    141 	bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
    142 	    rdev->gart.table_size);
    143 	bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
    144 	bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
    145 #else
    146 #ifdef CONFIG_X86
    147 	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
    148 	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
    149 		set_memory_wb((unsigned long)rdev->gart.ptr,
    150 			      rdev->gart.table_size >> PAGE_SHIFT);
    151 	}
    152 #endif
    153 	pci_free_consistent(rdev->pdev, rdev->gart.table_size,
    154 			    (void *)rdev->gart.ptr,
    155 			    rdev->gart.table_addr);
    156 	rdev->gart.ptr = NULL;
    157 	rdev->gart.table_addr = 0;
    158 #endif
    159 }
    160 
    161 /**
    162  * radeon_gart_table_vram_alloc - allocate vram for gart page table
    163  *
    164  * @rdev: radeon_device pointer
    165  *
    166  * Allocate video memory for GART page table
    167  * (pcie r4xx, r5xx+).  These asics require the
    168  * gart table to be in video memory.
    169  * Returns 0 for success, error for failure.
    170  */
    171 int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
    172 {
    173 	int r;
    174 
    175 	if (rdev->gart.robj == NULL) {
    176 		r = radeon_bo_create(rdev, rdev->gart.table_size,
    177 				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
    178 				     NULL, &rdev->gart.robj);
    179 		if (r) {
    180 			return r;
    181 		}
    182 	}
    183 	return 0;
    184 }
    185 
    186 /**
    187  * radeon_gart_table_vram_pin - pin gart page table in vram
    188  *
    189  * @rdev: radeon_device pointer
    190  *
    191  * Pin the GART page table in vram so it will not be moved
    192  * by the memory manager (pcie r4xx, r5xx+).  These asics require the
    193  * gart table to be in video memory.
    194  * Returns 0 for success, error for failure.
    195  */
    196 int radeon_gart_table_vram_pin(struct radeon_device *rdev)
    197 {
    198 	uint64_t gpu_addr;
    199 	int r;
    200 
    201 	r = radeon_bo_reserve(rdev->gart.robj, false);
    202 	if (unlikely(r != 0))
    203 		return r;
    204 	r = radeon_bo_pin(rdev->gart.robj,
    205 				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
    206 	if (r) {
    207 		radeon_bo_unreserve(rdev->gart.robj);
    208 		return r;
    209 	}
    210 	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
    211 	if (r)
    212 		radeon_bo_unpin(rdev->gart.robj);
    213 	radeon_bo_unreserve(rdev->gart.robj);
    214 	rdev->gart.table_addr = gpu_addr;
    215 	return r;
    216 }
    217 
    218 /**
    219  * radeon_gart_table_vram_unpin - unpin gart page table in vram
    220  *
    221  * @rdev: radeon_device pointer
    222  *
    223  * Unpin the GART page table in vram (pcie r4xx, r5xx+).
    224  * These asics require the gart table to be in video memory.
    225  */
    226 void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
    227 {
    228 	int r;
    229 
    230 	if (rdev->gart.robj == NULL) {
    231 		return;
    232 	}
    233 	r = radeon_bo_reserve(rdev->gart.robj, false);
    234 	if (likely(r == 0)) {
    235 		radeon_bo_kunmap(rdev->gart.robj);
    236 		radeon_bo_unpin(rdev->gart.robj);
    237 		radeon_bo_unreserve(rdev->gart.robj);
    238 		rdev->gart.ptr = NULL;
    239 	}
    240 }
    241 
    242 /**
    243  * radeon_gart_table_vram_free - free gart page table vram
    244  *
    245  * @rdev: radeon_device pointer
    246  *
    247  * Free the video memory used for the GART page table
    248  * (pcie r4xx, r5xx+).  These asics require the gart table to
    249  * be in video memory.
    250  */
    251 void radeon_gart_table_vram_free(struct radeon_device *rdev)
    252 {
    253 	if (rdev->gart.robj == NULL) {
    254 		return;
    255 	}
    256 	radeon_bo_unref(&rdev->gart.robj);
    257 }
    258 
    259 #ifdef __NetBSD__
    260 static void
    261 radeon_gart_pre_update(struct radeon_device *rdev, unsigned gpu_pgstart,
    262     unsigned gpu_npages)
    263 {
    264 
    265 	if (rdev->gart.rg_table_map != NULL)
    266 		bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
    267 		    gpu_pgstart*4, gpu_npages*4, BUS_DMASYNC_PREWRITE);
    268 }
    269 
    270 static void
    271 radeon_gart_post_update(struct radeon_device *rdev, unsigned gpu_pgstart,
    272     unsigned gpu_npages)
    273 {
    274 
    275 	membar_sync();		/* XXX overkill */
    276 	if (rdev->gart.rg_table_map != NULL)
    277 		bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
    278 		    gpu_pgstart*4, gpu_npages*4, BUS_DMASYNC_PREWRITE);
    279 	radeon_gart_tlb_flush(rdev);
    280 }
    281 #endif
    282 
    283 /*
    284  * Common gart functions.
    285  */
    286 #ifdef __NetBSD__
    287 void
    288 radeon_gart_unbind(struct radeon_device *rdev, unsigned gpu_start,
    289     unsigned npages)
    290 {
    291 	const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
    292 	const unsigned gpu_npages = (npages / gpu_per_cpu);
    293 	const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
    294 	const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
    295 	unsigned pgno, gpu_pgno;
    296 
    297 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    298 	KASSERT(npages <= rdev->gart.num_cpu_pages);
    299 	KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
    300 
    301 	if (!rdev->gart.ready) {
    302 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    303 		return;
    304 	}
    305 
    306 	radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
    307 	for (pgno = 0; pgno < npages; pgno++) {
    308 		if (rdev->gart.pages[pgstart + pgno] == NULL)
    309 			continue;
    310 		rdev->gart.pages[pgstart + pgno] = NULL;
    311 		rdev->gart.pages_addr[pgstart + pgno] = rdev->dummy_page.addr;
    312 		if (rdev->gart.ptr == NULL)
    313 			continue;
    314 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
    315 			radeon_gart_set_page(rdev, gpu_pgstart + gpu_pgno,
    316 			    (rdev->dummy_page.addr +
    317 				gpu_pgno*RADEON_GPU_PAGE_SIZE));
    318 	}
    319 	radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
    320 }
    321 #else
    322 /**
    323  * radeon_gart_unbind - unbind pages from the gart page table
    324  *
    325  * @rdev: radeon_device pointer
    326  * @offset: offset into the GPU's gart aperture
    327  * @pages: number of pages to unbind
    328  *
    329  * Unbinds the requested pages from the gart page table and
    330  * replaces them with the dummy page (all asics).
    331  */
    332 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
    333 			int pages)
    334 {
    335 	unsigned t;
    336 	unsigned p;
    337 	int i, j;
    338 	u64 page_base;
    339 
    340 	if (!rdev->gart.ready) {
    341 		WARN(1, "trying to unbind memory from uninitialized GART !\n");
    342 		return;
    343 	}
    344 	t = offset / RADEON_GPU_PAGE_SIZE;
    345 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
    346 	for (i = 0; i < pages; i++, p++) {
    347 		if (rdev->gart.pages[p]) {
    348 			rdev->gart.pages[p] = NULL;
    349 			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
    350 			page_base = rdev->gart.pages_addr[p];
    351 			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
    352 				if (rdev->gart.ptr) {
    353 					radeon_gart_set_page(rdev, t, page_base);
    354 				}
    355 				page_base += RADEON_GPU_PAGE_SIZE;
    356 			}
    357 		}
    358 	}
    359 	mb();
    360 	radeon_gart_tlb_flush(rdev);
    361 }
    362 #endif
    363 
    364 #ifdef __NetBSD__
    365 int
    366 radeon_gart_bind(struct radeon_device *rdev, unsigned gpu_start,
    367     unsigned npages, struct page **pages, bus_dmamap_t dmamap)
    368 {
    369 	const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
    370 	const unsigned gpu_npages = (npages / gpu_per_cpu);
    371 	const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
    372 	const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
    373 	unsigned pgno, gpu_pgno;
    374 
    375 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    376 	KASSERT(npages == dmamap->dm_nsegs);
    377 	KASSERT(npages <= rdev->gart.num_cpu_pages);
    378 	KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
    379 
    380 	if (!rdev->gart.ready) {
    381 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    382 		return -EINVAL;
    383 	}
    384 
    385 	radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
    386 	for (pgno = 0; pgno < npages; pgno++) {
    387 		const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr;
    388 
    389 		KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE);
    390 		rdev->gart.pages[pgstart + pgno] = pages[pgno];
    391 		rdev->gart.pages_addr[pgstart + pgno] = addr;
    392 		if (rdev->gart.ptr == NULL)
    393 			continue;
    394 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
    395 			radeon_gart_set_page(rdev, gpu_pgstart + gpu_pgno,
    396 			    (addr + gpu_pgno*RADEON_GPU_PAGE_SIZE));
    397 	}
    398 	radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
    399 
    400 	return 0;
    401 }
    402 #else
    403 /**
    404  * radeon_gart_bind - bind pages into the gart page table
    405  *
    406  * @rdev: radeon_device pointer
    407  * @offset: offset into the GPU's gart aperture
    408  * @pages: number of pages to bind
    409  * @pagelist: pages to bind
    410  * @dma_addr: DMA addresses of pages
    411  *
    412  * Binds the requested pages to the gart page table
    413  * (all asics).
    414  * Returns 0 for success, -EINVAL for failure.
    415  */
    416 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
    417 		     int pages, struct page **pagelist, dma_addr_t *dma_addr)
    418 {
    419 	unsigned t;
    420 	unsigned p;
    421 	uint64_t page_base;
    422 	int i, j;
    423 
    424 	if (!rdev->gart.ready) {
    425 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    426 		return -EINVAL;
    427 	}
    428 	t = offset / RADEON_GPU_PAGE_SIZE;
    429 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
    430 
    431 	for (i = 0; i < pages; i++, p++) {
    432 		rdev->gart.pages_addr[p] = dma_addr[i];
    433 		rdev->gart.pages[p] = pagelist[i];
    434 		if (rdev->gart.ptr) {
    435 			page_base = rdev->gart.pages_addr[p];
    436 			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
    437 				radeon_gart_set_page(rdev, t, page_base);
    438 				page_base += RADEON_GPU_PAGE_SIZE;
    439 			}
    440 		}
    441 	}
    442 	mb();
    443 	radeon_gart_tlb_flush(rdev);
    444 	return 0;
    445 }
    446 #endif
    447 
    448 /**
    449  * radeon_gart_restore - bind all pages in the gart page table
    450  *
    451  * @rdev: radeon_device pointer
    452  *
    453  * Binds all pages in the gart page table (all asics).
    454  * Used to rebuild the gart table on device startup or resume.
    455  */
    456 void radeon_gart_restore(struct radeon_device *rdev)
    457 {
    458 #ifdef __NetBSD__
    459 	const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
    460 	unsigned pgno, gpu_pgno;
    461 
    462 	if (rdev->gart.ptr == NULL)
    463 		return;
    464 
    465 	radeon_gart_pre_update(rdev, 0, rdev->gart.num_gpu_pages);
    466 	for (pgno = 0; pgno < rdev->gart.num_cpu_pages; pgno++) {
    467 		const bus_addr_t addr = rdev->gart.pages_addr[pgno];
    468 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
    469 			radeon_gart_set_page(rdev, gpu_pgno,
    470 			    (addr + gpu_pgno*RADEON_GPU_PAGE_SIZE));
    471 	}
    472 	radeon_gart_pre_update(rdev, 0, rdev->gart.num_gpu_pages);
    473 #else
    474 	int i, j, t;
    475 	u64 page_base;
    476 
    477 	if (!rdev->gart.ptr) {
    478 		return;
    479 	}
    480 	for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
    481 		page_base = rdev->gart.pages_addr[i];
    482 		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
    483 			radeon_gart_set_page(rdev, t, page_base);
    484 			page_base += RADEON_GPU_PAGE_SIZE;
    485 		}
    486 	}
    487 	mb();
    488 	radeon_gart_tlb_flush(rdev);
    489 #endif
    490 }
    491 
    492 /**
    493  * radeon_gart_init - init the driver info for managing the gart
    494  *
    495  * @rdev: radeon_device pointer
    496  *
    497  * Allocate the dummy page and init the gart driver info (all asics).
    498  * Returns 0 for success, error for failure.
    499  */
    500 int radeon_gart_init(struct radeon_device *rdev)
    501 {
    502 	int r, i;
    503 
    504 	if (rdev->gart.pages) {
    505 		return 0;
    506 	}
    507 	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
    508 	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
    509 		DRM_ERROR("Page size is smaller than GPU page size!\n");
    510 		return -EINVAL;
    511 	}
    512 	r = radeon_dummy_page_init(rdev);
    513 	if (r)
    514 		return r;
    515 	/* Compute table size */
    516 	rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
    517 	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
    518 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
    519 		 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
    520 	/* Allocate pages table */
    521 	rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
    522 	if (rdev->gart.pages == NULL) {
    523 		radeon_gart_fini(rdev);
    524 		return -ENOMEM;
    525 	}
    526 	rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
    527 					rdev->gart.num_cpu_pages);
    528 	if (rdev->gart.pages_addr == NULL) {
    529 		radeon_gart_fini(rdev);
    530 		return -ENOMEM;
    531 	}
    532 	/* set GART entry to point to the dummy page by default */
    533 	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
    534 		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
    535 	}
    536 	return 0;
    537 }
    538 
    539 /**
    540  * radeon_gart_fini - tear down the driver info for managing the gart
    541  *
    542  * @rdev: radeon_device pointer
    543  *
    544  * Tear down the gart driver info and free the dummy page (all asics).
    545  */
    546 void radeon_gart_fini(struct radeon_device *rdev)
    547 {
    548 	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
    549 		/* unbind pages */
    550 		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
    551 	}
    552 	rdev->gart.ready = false;
    553 	vfree(rdev->gart.pages);
    554 	vfree(rdev->gart.pages_addr);
    555 	rdev->gart.pages = NULL;
    556 	rdev->gart.pages_addr = NULL;
    557 
    558 	radeon_dummy_page_fini(rdev);
    559 }
    560