Home | History | Annotate | Line # | Download | only in amdgpu
      1 /*	$NetBSD: amdgpu_gart.c,v 1.12 2024/07/01 13:27:55 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gart.c,v 1.12 2024/07/01 13:27:55 riastradh Exp $");
     33 
     34 #include <linux/pci.h>
     35 #include <linux/vmalloc.h>
     36 
     37 #include <drm/amdgpu_drm.h>
     38 #ifdef CONFIG_X86
     39 #include <asm/set_memory.h>
     40 #endif
     41 #include "amdgpu.h"
     42 
     43 /*
     44  * GART
     45  * The GART (Graphics Aperture Remapping Table) is an aperture
     46  * in the GPU's address space.  System pages can be mapped into
     47  * the aperture and look like contiguous pages from the GPU's
     48  * perspective.  A page table maps the pages in the aperture
     49  * to the actual backing pages in system memory.
     50  *
     51  * Radeon GPUs support both an internal GART, as described above,
     52  * and AGP.  AGP works similarly, but the GART table is configured
     53  * and maintained by the northbridge rather than the driver.
     54  * Radeon hw has a separate AGP aperture that is programmed to
     55  * point to the AGP aperture provided by the northbridge and the
     56  * requests are passed through to the northbridge aperture.
     57  * Both AGP and internal GART can be used at the same time, however
     58  * that is not currently supported by the driver.
     59  *
     60  * This file handles the common internal GART management.
     61  */
     62 
     63 /*
     64  * Common GART table functions.
     65  */
     66 
     67 /**
     68  * amdgpu_dummy_page_init - init dummy page used by the driver
     69  *
     70  * @adev: amdgpu_device pointer
     71  *
     72  * Allocate the dummy page used by the driver (all asics).
     73  * This dummy page is used by the driver as a filler for gart entries
     74  * when pages are taken out of the GART
     75  * Returns 0 on sucess, -ENOMEM on failure.
     76  */
     77 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
     78 {
     79 #ifdef __NetBSD__
     80 	int rsegs;
     81 	void *p;
     82 	int error;
     83 
     84 	/* XXX Can this be called more than once??  */
     85 	if (adev->dummy_page_map != NULL)
     86 		return 0;
     87 
     88 	error = bus_dmamem_alloc(adev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
     89 	    &adev->dummy_page_seg, 1, &rsegs, BUS_DMA_WAITOK);
     90 	if (error)
     91 		goto fail0;
     92 	KASSERT(rsegs == 1);
     93 	error = bus_dmamem_map(adev->ddev->dmat, &adev->dummy_page_seg, 1,
     94 	    PAGE_SIZE, &p, BUS_DMA_WAITOK);
     95 	if (error)
     96 		goto fail1;
     97 	memset(p, 0, PAGE_SIZE);
     98 	bus_dmamem_unmap(adev->ddev->dmat, p, PAGE_SIZE);
     99 	error = bus_dmamap_create(adev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
    100 	    BUS_DMA_WAITOK, &adev->dummy_page_map);
    101 	if (error)
    102 		goto fail1;
    103 	error = bus_dmamap_load_raw(adev->ddev->dmat, adev->dummy_page_map,
    104 	    &adev->dummy_page_seg, 1, PAGE_SIZE, BUS_DMA_WAITOK);
    105 	if (error)
    106 		goto fail2;
    107 
    108 	bus_dmamap_sync(adev->ddev->dmat, adev->dummy_page_map, 0, PAGE_SIZE,
    109 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
    110 
    111 	/* Success!  */
    112 	adev->dummy_page_addr = adev->dummy_page_map->dm_segs[0].ds_addr;
    113 	return 0;
    114 
    115 fail3: __unused
    116 	bus_dmamap_unload(adev->ddev->dmat, adev->dummy_page_map);
    117 fail2:	bus_dmamap_destroy(adev->ddev->dmat, adev->dummy_page_map);
    118 fail1:	bus_dmamem_free(adev->ddev->dmat, &adev->dummy_page_seg, 1);
    119 fail0:	KASSERT(error);
    120 	adev->dummy_page_map = NULL;
    121 	adev->dummy_page_addr = 0; /* paranoia */
    122 	/* XXX errno NetBSD->Linux */
    123 	return -error;
    124 #else  /* __NetBSD__ */
    125 	struct page *dummy_page = ttm_bo_glob.dummy_read_page;
    126 
    127 	if (adev->dummy_page_addr)
    128 		return 0;
    129 	adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
    130 					     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    131 	if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
    132 		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
    133 		adev->dummy_page_addr = 0;
    134 		return -ENOMEM;
    135 	}
    136 	return 0;
    137 #endif	/* __NetBSD__ */
    138 }
    139 
    140 /**
    141  * amdgpu_dummy_page_fini - free dummy page used by the driver
    142  *
    143  * @adev: amdgpu_device pointer
    144  *
    145  * Frees the dummy page used by the driver (all asics).
    146  */
    147 static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
    148 {
    149 	if (!adev->dummy_page_addr)
    150 		return;
    151 #ifdef __NetBSD__
    152 	bus_dmamap_unload(adev->ddev->dmat, adev->dummy_page_map);
    153 	bus_dmamap_destroy(adev->ddev->dmat, adev->dummy_page_map);
    154 	bus_dmamem_free(adev->ddev->dmat, &adev->dummy_page_seg, 1);
    155 	adev->dummy_page_map = NULL;
    156 #else
    157 	pci_unmap_page(adev->pdev, adev->dummy_page_addr,
    158 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
    159 #endif
    160 	adev->dummy_page_addr = 0;
    161 }
    162 
    163 /**
    164  * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
    165  *
    166  * @adev: amdgpu_device pointer
    167  *
    168  * Allocate video memory for GART page table
    169  * (pcie r4xx, r5xx+).  These asics require the
    170  * gart table to be in video memory.
    171  * Returns 0 for success, error for failure.
    172  */
    173 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
    174 {
    175 	int r;
    176 
    177 	if (adev->gart.bo == NULL) {
    178 		struct amdgpu_bo_param bp;
    179 
    180 		memset(&bp, 0, sizeof(bp));
    181 		bp.size = adev->gart.table_size;
    182 		bp.byte_align = PAGE_SIZE;
    183 		bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
    184 		bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
    185 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
    186 		bp.type = ttm_bo_type_kernel;
    187 		bp.resv = NULL;
    188 		r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
    189 		if (r) {
    190 			return r;
    191 		}
    192 	}
    193 	return 0;
    194 }
    195 
    196 /**
    197  * amdgpu_gart_table_vram_pin - pin gart page table in vram
    198  *
    199  * @adev: amdgpu_device pointer
    200  *
    201  * Pin the GART page table in vram so it will not be moved
    202  * by the memory manager (pcie r4xx, r5xx+).  These asics require the
    203  * gart table to be in video memory.
    204  * Returns 0 for success, error for failure.
    205  */
    206 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
    207 {
    208 	int r;
    209 
    210 	r = amdgpu_bo_reserve(adev->gart.bo, false);
    211 	if (unlikely(r != 0))
    212 		return r;
    213 	r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
    214 	if (r) {
    215 		amdgpu_bo_unreserve(adev->gart.bo);
    216 		return r;
    217 	}
    218 	r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
    219 	if (r)
    220 		amdgpu_bo_unpin(adev->gart.bo);
    221 	amdgpu_bo_unreserve(adev->gart.bo);
    222 	return r;
    223 }
    224 
    225 /**
    226  * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
    227  *
    228  * @adev: amdgpu_device pointer
    229  *
    230  * Unpin the GART page table in vram (pcie r4xx, r5xx+).
    231  * These asics require the gart table to be in video memory.
    232  */
    233 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
    234 {
    235 	int r;
    236 
    237 	if (adev->gart.bo == NULL) {
    238 		return;
    239 	}
    240 	r = amdgpu_bo_reserve(adev->gart.bo, true);
    241 	if (likely(r == 0)) {
    242 		amdgpu_bo_kunmap(adev->gart.bo);
    243 		amdgpu_bo_unpin(adev->gart.bo);
    244 		amdgpu_bo_unreserve(adev->gart.bo);
    245 		adev->gart.ptr = NULL;
    246 	}
    247 }
    248 
    249 /**
    250  * amdgpu_gart_table_vram_free - free gart page table vram
    251  *
    252  * @adev: amdgpu_device pointer
    253  *
    254  * Free the video memory used for the GART page table
    255  * (pcie r4xx, r5xx+).  These asics require the gart table to
    256  * be in video memory.
    257  */
    258 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
    259 {
    260 	if (adev->gart.bo == NULL) {
    261 		return;
    262 	}
    263 	amdgpu_bo_unref(&adev->gart.bo);
    264 }
    265 
    266 #ifdef __NetBSD__
    267 static void
    268 amdgpu_gart_pre_update(struct amdgpu_device *adev, unsigned gpu_pgstart,
    269     unsigned gpu_npages)
    270 {
    271 
    272 	if (adev->gart.ag_table_map != NULL) {
    273 		const unsigned entsize =
    274 		    adev->gart.table_size / adev->gart.num_gpu_pages;
    275 
    276 		bus_dmamap_sync(adev->ddev->dmat, adev->gart.ag_table_map,
    277 		    gpu_pgstart*entsize, gpu_npages*entsize,
    278 		    BUS_DMASYNC_POSTWRITE);
    279 	}
    280 }
    281 
    282 static void
    283 amdgpu_gart_post_update(struct amdgpu_device *adev, unsigned gpu_pgstart,
    284     unsigned gpu_npages)
    285 {
    286 	unsigned i;
    287 
    288 	if (adev->gart.ag_table_map != NULL) {
    289 		const unsigned entsize =
    290 		    adev->gart.table_size / adev->gart.num_gpu_pages;
    291 
    292 		bus_dmamap_sync(adev->ddev->dmat, adev->gart.ag_table_map,
    293 		    gpu_pgstart*entsize, gpu_npages*entsize,
    294 		    BUS_DMASYNC_PREWRITE);
    295 	}
    296 	mb();			/* XXX why is bus_dmamap_sync not enough? */
    297 	amdgpu_asic_flush_hdp(adev, NULL);
    298 	for (i = 0; i < adev->num_vmhubs; i++)
    299 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    300 }
    301 #endif
    302 
    303 /*
    304  * Common gart functions.
    305  */
    306 #ifdef __NetBSD__
    307 int
    308 amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t gpu_start,
    309     unsigned npages)
    310 {
    311 	const unsigned gpu_per_cpu = AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    312 	const unsigned gpu_npages = (npages * gpu_per_cpu);
    313 	const uint64_t gpu_pgstart = (gpu_start / AMDGPU_GPU_PAGE_SIZE);
    314 	const uint64_t pgstart __diagused = (gpu_pgstart / gpu_per_cpu);
    315 	uint64_t pgno, gpu_pgno;
    316 	uint32_t flags = AMDGPU_PTE_SYSTEM;
    317 
    318 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    319 	KASSERT(npages <= adev->gart.num_cpu_pages);
    320 	KASSERT(gpu_npages <= adev->gart.num_cpu_pages);
    321 
    322 	if (!adev->gart.ready) {
    323 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    324 		return -EINVAL;
    325 	}
    326 
    327 	amdgpu_gart_pre_update(adev, gpu_pgstart, gpu_npages);
    328 	for (pgno = 0; pgno < npages; pgno++) {
    329 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    330 		adev->gart.pages[pgstart + pgno] = NULL;
    331 #endif
    332 
    333 		if (adev->gart.ptr == NULL)
    334 			continue;
    335 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
    336 			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
    337 			    gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno,
    338 			    adev->dummy_page_addr, flags);
    339 		}
    340 	}
    341 	amdgpu_gart_post_update(adev, gpu_pgstart, gpu_npages);
    342 
    343 	return 0;
    344 }
    345 #else  /* __NetBSD__ */
    346 /**
    347  * amdgpu_gart_unbind - unbind pages from the gart page table
    348  *
    349  * @adev: amdgpu_device pointer
    350  * @offset: offset into the GPU's gart aperture
    351  * @pages: number of pages to unbind
    352  *
    353  * Unbinds the requested pages from the gart page table and
    354  * replaces them with the dummy page (all asics).
    355  * Returns 0 for success, -EINVAL for failure.
    356  */
    357 int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
    358 			int pages)
    359 {
    360 	unsigned t;
    361 	unsigned p;
    362 	int i, j;
    363 	u64 page_base;
    364 	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
    365 	uint64_t flags = 0;
    366 
    367 	if (!adev->gart.ready) {
    368 		WARN(1, "trying to unbind memory from uninitialized GART !\n");
    369 		return -EINVAL;
    370 	}
    371 
    372 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    373 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    374 	for (i = 0; i < pages; i++, p++) {
    375 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    376 		adev->gart.pages[p] = NULL;
    377 #endif
    378 		page_base = adev->dummy_page_addr;
    379 		if (!adev->gart.ptr)
    380 			continue;
    381 
    382 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
    383 			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
    384 					       t, page_base, flags);
    385 			page_base += AMDGPU_GPU_PAGE_SIZE;
    386 		}
    387 	}
    388 	mb();
    389 	amdgpu_asic_flush_hdp(adev, NULL);
    390 	for (i = 0; i < adev->num_vmhubs; i++)
    391 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    392 
    393 	return 0;
    394 }
    395 #endif	/* __NetBSD__ */
    396 
    397 /**
    398  * amdgpu_gart_map - map dma_addresses into GART entries
    399  *
    400  * @adev: amdgpu_device pointer
    401  * @offset: offset into the GPU's gart aperture
    402  * @pages: number of pages to bind
    403  * @dma_addr: DMA addresses of pages
    404  * @flags: page table entry flags
    405  * @dst: CPU address of the gart table
    406  *
    407  * Map the dma_addresses into GART entries (all asics).
    408  * Returns 0 for success, -EINVAL for failure.
    409  */
    410 #ifdef __NetBSD__
    411 int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t gpu_start,
    412     unsigned npages, bus_size_t map_start, bus_dmamap_t dmamap, uint32_t flags,
    413     void *dst)
    414 {
    415 	bus_size_t seg_off = 0;
    416 	unsigned i, j, t;
    417 
    418 	CTASSERT(AMDGPU_GPU_PAGE_SIZE <= PAGE_SIZE);
    419 	CTASSERT((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) == 0);
    420 
    421 	KASSERT((gpu_start & (PAGE_SIZE - 1)) == 0);
    422 
    423 	if (!adev->gart.ready) {
    424 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    425 		return -EINVAL;
    426 	}
    427 
    428 	for (i = 0; i < dmamap->dm_nsegs; i++) {
    429 		KASSERT((dmamap->dm_segs[i].ds_len & (PAGE_SIZE - 1)) == 0);
    430 		if (map_start == 0)
    431 			break;
    432 		if (map_start < dmamap->dm_segs[i].ds_len) {
    433 			seg_off = map_start;
    434 			break;
    435 		}
    436 		map_start -= dmamap->dm_segs[i].ds_len;
    437 	}
    438 	KASSERT(i < dmamap->dm_nsegs);
    439 
    440 	t = gpu_start / AMDGPU_GPU_PAGE_SIZE;
    441 
    442 	for (i = 0; npages --> 0;) {
    443 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
    444 			KASSERT(i < dmamap->dm_nsegs);
    445 			KASSERT(seg_off < dmamap->dm_segs[i].ds_len);
    446 			amdgpu_gmc_set_pte_pde(adev, dst, t,
    447 			    dmamap->dm_segs[i].ds_addr + seg_off, flags);
    448 			seg_off += AMDGPU_GPU_PAGE_SIZE;
    449 			if (seg_off == dmamap->dm_segs[i].ds_len) {
    450 				i++;
    451 				seg_off = 0;
    452 			}
    453 		}
    454 	}
    455 
    456 	return 0;
    457 }
    458 #else
    459 int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
    460 		    int pages, dma_addr_t *dma_addr, uint64_t flags,
    461 		    void *dst)
    462 {
    463 	uint64_t page_base;
    464 	unsigned i, j, t;
    465 
    466 	if (!adev->gart.ready) {
    467 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    468 		return -EINVAL;
    469 	}
    470 
    471 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    472 
    473 	for (i = 0; i < pages; i++) {
    474 		page_base = dma_addr[i];
    475 		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
    476 			amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
    477 			page_base += AMDGPU_GPU_PAGE_SIZE;
    478 		}
    479 	}
    480 	return 0;
    481 }
    482 #endif	/* __NetBSD__ */
    483 
    484 #ifdef __NetBSD__
    485 int
    486 amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t gpu_start,
    487     unsigned npages, struct page **pages, bus_dmamap_t dmamap, uint32_t flags)
    488 {
    489 	const unsigned gpu_per_cpu = AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    490 	const unsigned gpu_npages = (npages * gpu_per_cpu);
    491 	const uint64_t gpu_pgstart = (gpu_start / AMDGPU_GPU_PAGE_SIZE);
    492 	const uint64_t pgstart __diagused = (gpu_pgstart / gpu_per_cpu);
    493 	uint64_t pgno, gpu_pgno;
    494 
    495 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
    496 	KASSERT(npages == dmamap->dm_nsegs);
    497 	KASSERT(npages <= adev->gart.num_cpu_pages);
    498 	KASSERT(gpu_npages <= adev->gart.num_cpu_pages);
    499 
    500 	if (!adev->gart.ready) {
    501 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    502 		return -EINVAL;
    503 	}
    504 
    505 	amdgpu_gart_pre_update(adev, gpu_pgstart, gpu_npages);
    506 	for (pgno = 0; pgno < npages; pgno++) {
    507 		const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr;
    508 
    509 		KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE);
    510 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    511 		adev->gart.pages[pgstart + pgno] = NULL;
    512 #endif
    513 
    514 		if (adev->gart.ptr == NULL)
    515 			continue;
    516 
    517 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
    518 			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
    519 			    gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno,
    520 			    addr + gpu_pgno*AMDGPU_GPU_PAGE_SIZE, flags);
    521 		}
    522 	}
    523 	amdgpu_gart_post_update(adev, gpu_pgstart, gpu_npages);
    524 
    525 	return 0;
    526 }
    527 #else  /* __NetBSD__ */
    528 /**
    529  * amdgpu_gart_bind - bind pages into the gart page table
    530  *
    531  * @adev: amdgpu_device pointer
    532  * @offset: offset into the GPU's gart aperture
    533  * @pages: number of pages to bind
    534  * @pagelist: pages to bind
    535  * @dma_addr: DMA addresses of pages
    536  * @flags: page table entry flags
    537  *
    538  * Binds the requested pages to the gart page table
    539  * (all asics).
    540  * Returns 0 for success, -EINVAL for failure.
    541  */
    542 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
    543 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
    544 		     uint64_t flags)
    545 {
    546 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    547 	unsigned t,p;
    548 #endif
    549 	int r, i;
    550 
    551 	if (!adev->gart.ready) {
    552 		WARN(1, "trying to bind memory to uninitialized GART !\n");
    553 		return -EINVAL;
    554 	}
    555 
    556 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    557 	t = offset / AMDGPU_GPU_PAGE_SIZE;
    558 	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    559 	for (i = 0; i < pages; i++, p++)
    560 		adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
    561 #endif
    562 
    563 	if (!adev->gart.ptr)
    564 		return 0;
    565 
    566 	r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
    567 		    adev->gart.ptr);
    568 	if (r)
    569 		return r;
    570 
    571 	mb();
    572 	amdgpu_asic_flush_hdp(adev, NULL);
    573 	for (i = 0; i < adev->num_vmhubs; i++)
    574 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
    575 	return 0;
    576 }
    577 #endif
    578 
    579 /**
    580  * amdgpu_gart_init - init the driver info for managing the gart
    581  *
    582  * @adev: amdgpu_device pointer
    583  *
    584  * Allocate the dummy page and init the gart driver info (all asics).
    585  * Returns 0 for success, error for failure.
    586  */
    587 int amdgpu_gart_init(struct amdgpu_device *adev)
    588 {
    589 	int r;
    590 
    591 	if (adev->dummy_page_addr)
    592 		return 0;
    593 
    594 	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
    595 	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
    596 		DRM_ERROR("Page size is smaller than GPU page size!\n");
    597 		return -EINVAL;
    598 	}
    599 	r = amdgpu_gart_dummy_page_init(adev);
    600 	if (r)
    601 		return r;
    602 	/* Compute table size */
    603 	adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
    604 	adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
    605 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
    606 		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
    607 
    608 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    609 	/* Allocate pages table */
    610 	adev->gart.pages = vzalloc(array_size(sizeof(void *),
    611 					      adev->gart.num_cpu_pages));
    612 	if (adev->gart.pages == NULL)
    613 		return -ENOMEM;
    614 #endif
    615 
    616 	return 0;
    617 }
    618 
    619 /**
    620  * amdgpu_gart_fini - tear down the driver info for managing the gart
    621  *
    622  * @adev: amdgpu_device pointer
    623  *
    624  * Tear down the gart driver info and free the dummy page (all asics).
    625  */
    626 void amdgpu_gart_fini(struct amdgpu_device *adev)
    627 {
    628 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
    629 	vfree(adev->gart.pages);
    630 	adev->gart.pages = NULL;
    631 #endif
    632 	amdgpu_gart_dummy_page_fini(adev);
    633 }
    634