Home | History | Annotate | Line # | Download | only in radeon
      1  1.26  riastrad /*	$NetBSD: radeon_ttm.c,v 1.26 2022/07/20 01:22:38 riastradh Exp $	*/
      2  1.10  riastrad 
      3   1.1  riastrad /*
      4   1.1  riastrad  * Copyright 2009 Jerome Glisse.
      5   1.1  riastrad  * All Rights Reserved.
      6   1.1  riastrad  *
      7   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      8   1.1  riastrad  * copy of this software and associated documentation files (the
      9   1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     10   1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     11   1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     12   1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     13   1.1  riastrad  * the following conditions:
     14   1.1  riastrad  *
     15   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16   1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17   1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18   1.1  riastrad  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19   1.1  riastrad  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20   1.1  riastrad  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21   1.1  riastrad  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22   1.1  riastrad  *
     23   1.1  riastrad  * The above copyright notice and this permission notice (including the
     24   1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     25   1.1  riastrad  * of the Software.
     26   1.1  riastrad  *
     27   1.1  riastrad  */
     28   1.1  riastrad /*
     29   1.1  riastrad  * Authors:
     30   1.1  riastrad  *    Jerome Glisse <glisse (at) freedesktop.org>
     31   1.1  riastrad  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
     32   1.1  riastrad  *    Dave Airlie
     33   1.1  riastrad  */
     34  1.18  riastrad 
     35  1.10  riastrad #include <sys/cdefs.h>
     36  1.26  riastrad __KERNEL_RCSID(0, "$NetBSD: radeon_ttm.c,v 1.26 2022/07/20 01:22:38 riastradh Exp $");
     37  1.10  riastrad 
     38  1.18  riastrad #include <linux/dma-mapping.h>
     39  1.18  riastrad #include <linux/pagemap.h>
     40  1.18  riastrad #include <linux/pci.h>
     41   1.1  riastrad #include <linux/seq_file.h>
     42   1.1  riastrad #include <linux/slab.h>
     43  1.18  riastrad #include <linux/swap.h>
     44   1.1  riastrad #include <linux/swiotlb.h>
     45  1.18  riastrad 
     46  1.18  riastrad #include <drm/drm_agpsupport.h>
     47  1.18  riastrad #include <drm/drm_debugfs.h>
     48  1.18  riastrad #include <drm/drm_device.h>
     49  1.18  riastrad #include <drm/drm_file.h>
     50  1.18  riastrad #include <drm/drm_prime.h>
     51  1.18  riastrad #include <drm/radeon_drm.h>
     52  1.18  riastrad #include <drm/ttm/ttm_bo_api.h>
     53  1.18  riastrad #include <drm/ttm/ttm_bo_driver.h>
     54  1.18  riastrad #include <drm/ttm/ttm_module.h>
     55  1.18  riastrad #include <drm/ttm/ttm_page_alloc.h>
     56  1.18  riastrad #include <drm/ttm/ttm_placement.h>
     57  1.18  riastrad 
     58   1.1  riastrad #include "radeon_reg.h"
     59   1.1  riastrad #include "radeon.h"
     60   1.1  riastrad 
     61   1.2  riastrad #ifdef __NetBSD__
     62   1.2  riastrad #include <uvm/uvm_extern.h>
     63   1.3  riastrad #include <uvm/uvm_fault.h>
     64   1.2  riastrad #include <uvm/uvm_param.h>
     65   1.2  riastrad #include <drm/bus_dma_hacks.h>
     66   1.2  riastrad #endif
     67   1.2  riastrad 
     68   1.1  riastrad static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
     69   1.1  riastrad static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
     70   1.1  riastrad 
     71   1.1  riastrad static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
     72   1.1  riastrad {
     73   1.1  riastrad 	struct radeon_mman *mman;
     74   1.1  riastrad 	struct radeon_device *rdev;
     75   1.1  riastrad 
     76   1.1  riastrad 	mman = container_of(bdev, struct radeon_mman, bdev);
     77   1.1  riastrad 	rdev = container_of(mman, struct radeon_device, mman);
     78   1.1  riastrad 	return rdev;
     79   1.1  riastrad }
     80   1.1  riastrad 
     81   1.1  riastrad static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
     82   1.1  riastrad {
     83   1.1  riastrad 	return 0;
     84   1.1  riastrad }
     85   1.1  riastrad 
     86   1.1  riastrad static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
     87   1.1  riastrad 				struct ttm_mem_type_manager *man)
     88   1.1  riastrad {
     89   1.1  riastrad 	struct radeon_device *rdev;
     90   1.1  riastrad 
     91   1.1  riastrad 	rdev = radeon_get_rdev(bdev);
     92   1.1  riastrad 
     93   1.1  riastrad 	switch (type) {
     94   1.1  riastrad 	case TTM_PL_SYSTEM:
     95   1.1  riastrad 		/* System memory */
     96   1.1  riastrad 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
     97   1.1  riastrad 		man->available_caching = TTM_PL_MASK_CACHING;
     98   1.1  riastrad 		man->default_caching = TTM_PL_FLAG_CACHED;
     99   1.1  riastrad 		break;
    100   1.1  riastrad 	case TTM_PL_TT:
    101   1.1  riastrad 		man->func = &ttm_bo_manager_func;
    102   1.1  riastrad 		man->gpu_offset = rdev->mc.gtt_start;
    103   1.1  riastrad 		man->available_caching = TTM_PL_MASK_CACHING;
    104   1.1  riastrad 		man->default_caching = TTM_PL_FLAG_CACHED;
    105   1.1  riastrad 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
    106  1.10  riastrad #if IS_ENABLED(CONFIG_AGP)
    107   1.1  riastrad 		if (rdev->flags & RADEON_IS_AGP) {
    108   1.1  riastrad 			if (!rdev->ddev->agp) {
    109   1.1  riastrad 				DRM_ERROR("AGP is not enabled for memory type %u\n",
    110   1.1  riastrad 					  (unsigned)type);
    111   1.1  riastrad 				return -EINVAL;
    112   1.1  riastrad 			}
    113   1.1  riastrad 			if (!rdev->ddev->agp->cant_use_aperture)
    114   1.1  riastrad 				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
    115   1.1  riastrad 			man->available_caching = TTM_PL_FLAG_UNCACHED |
    116   1.1  riastrad 						 TTM_PL_FLAG_WC;
    117   1.1  riastrad 			man->default_caching = TTM_PL_FLAG_WC;
    118   1.1  riastrad 		}
    119   1.1  riastrad #endif
    120   1.1  riastrad 		break;
    121   1.1  riastrad 	case TTM_PL_VRAM:
    122   1.1  riastrad 		/* "On-card" video ram */
    123   1.1  riastrad 		man->func = &ttm_bo_manager_func;
    124   1.1  riastrad 		man->gpu_offset = rdev->mc.vram_start;
    125   1.1  riastrad 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
    126   1.1  riastrad 			     TTM_MEMTYPE_FLAG_MAPPABLE;
    127   1.1  riastrad 		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
    128   1.1  riastrad 		man->default_caching = TTM_PL_FLAG_WC;
    129   1.1  riastrad 		break;
    130   1.1  riastrad 	default:
    131   1.1  riastrad 		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
    132   1.1  riastrad 		return -EINVAL;
    133   1.1  riastrad 	}
    134   1.1  riastrad 	return 0;
    135   1.1  riastrad }
    136   1.1  riastrad 
    137   1.1  riastrad static void radeon_evict_flags(struct ttm_buffer_object *bo,
    138   1.1  riastrad 				struct ttm_placement *placement)
    139   1.1  riastrad {
    140  1.18  riastrad 	static const struct ttm_place placements = {
    141  1.10  riastrad 		.fpfn = 0,
    142  1.10  riastrad 		.lpfn = 0,
    143  1.10  riastrad 		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
    144  1.10  riastrad 	};
    145  1.10  riastrad 
    146   1.1  riastrad 	struct radeon_bo *rbo;
    147   1.1  riastrad 
    148   1.1  riastrad 	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
    149   1.1  riastrad 		placement->placement = &placements;
    150   1.1  riastrad 		placement->busy_placement = &placements;
    151   1.1  riastrad 		placement->num_placement = 1;
    152   1.1  riastrad 		placement->num_busy_placement = 1;
    153   1.1  riastrad 		return;
    154   1.1  riastrad 	}
    155   1.1  riastrad 	rbo = container_of(bo, struct radeon_bo, tbo);
    156   1.1  riastrad 	switch (bo->mem.mem_type) {
    157   1.1  riastrad 	case TTM_PL_VRAM:
    158  1.10  riastrad 		if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
    159   1.1  riastrad 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
    160  1.10  riastrad 		else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
    161  1.10  riastrad 			 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
    162  1.10  riastrad 			unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
    163  1.10  riastrad 			int i;
    164  1.10  riastrad 
    165  1.10  riastrad 			/* Try evicting to the CPU inaccessible part of VRAM
    166  1.10  riastrad 			 * first, but only set GTT as busy placement, so this
    167  1.10  riastrad 			 * BO will be evicted to GTT rather than causing other
    168  1.10  riastrad 			 * BOs to be evicted from VRAM
    169  1.10  riastrad 			 */
    170  1.10  riastrad 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
    171  1.10  riastrad 							 RADEON_GEM_DOMAIN_GTT);
    172  1.10  riastrad 			rbo->placement.num_busy_placement = 0;
    173  1.10  riastrad 			for (i = 0; i < rbo->placement.num_placement; i++) {
    174  1.10  riastrad 				if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
    175  1.10  riastrad 					if (rbo->placements[i].fpfn < fpfn)
    176  1.10  riastrad 						rbo->placements[i].fpfn = fpfn;
    177  1.10  riastrad 				} else {
    178  1.10  riastrad 					rbo->placement.busy_placement =
    179  1.10  riastrad 						&rbo->placements[i];
    180  1.10  riastrad 					rbo->placement.num_busy_placement = 1;
    181  1.10  riastrad 				}
    182  1.10  riastrad 			}
    183  1.10  riastrad 		} else
    184   1.1  riastrad 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
    185   1.1  riastrad 		break;
    186   1.1  riastrad 	case TTM_PL_TT:
    187   1.1  riastrad 	default:
    188   1.1  riastrad 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
    189   1.1  riastrad 	}
    190   1.1  riastrad 	*placement = rbo->placement;
    191   1.1  riastrad }
    192   1.1  riastrad 
    193   1.1  riastrad static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
    194   1.1  riastrad {
    195   1.1  riastrad 	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
    196   1.1  riastrad 
    197  1.10  riastrad 	if (radeon_ttm_tt_has_userptr(bo->ttm))
    198  1.10  riastrad 		return -EPERM;
    199  1.19  riastrad #ifdef __NetBSD__
    200  1.19  riastrad 	struct drm_file *drm_file = filp->f_data;
    201  1.20  riastrad 	return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, drm_file);
    202  1.19  riastrad #else
    203  1.18  riastrad 	return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
    204  1.18  riastrad 					  filp->private_data);
    205  1.19  riastrad #endif
    206   1.1  riastrad }
    207   1.1  riastrad 
    208   1.1  riastrad static void radeon_move_null(struct ttm_buffer_object *bo,
    209   1.1  riastrad 			     struct ttm_mem_reg *new_mem)
    210   1.1  riastrad {
    211   1.1  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
    212   1.1  riastrad 
    213   1.1  riastrad 	BUG_ON(old_mem->mm_node != NULL);
    214   1.1  riastrad 	*old_mem = *new_mem;
    215   1.1  riastrad 	new_mem->mm_node = NULL;
    216   1.1  riastrad }
    217   1.1  riastrad 
    218   1.1  riastrad static int radeon_move_blit(struct ttm_buffer_object *bo,
    219   1.1  riastrad 			bool evict, bool no_wait_gpu,
    220   1.1  riastrad 			struct ttm_mem_reg *new_mem,
    221   1.1  riastrad 			struct ttm_mem_reg *old_mem)
    222   1.1  riastrad {
    223   1.1  riastrad 	struct radeon_device *rdev;
    224   1.1  riastrad 	uint64_t old_start, new_start;
    225   1.1  riastrad 	struct radeon_fence *fence;
    226  1.10  riastrad 	unsigned num_pages;
    227   1.1  riastrad 	int r, ridx;
    228   1.1  riastrad 
    229   1.1  riastrad 	rdev = radeon_get_rdev(bo->bdev);
    230   1.1  riastrad 	ridx = radeon_copy_ring_index(rdev);
    231  1.10  riastrad 	old_start = (u64)old_mem->start << PAGE_SHIFT;
    232  1.10  riastrad 	new_start = (u64)new_mem->start << PAGE_SHIFT;
    233   1.1  riastrad 
    234   1.1  riastrad 	switch (old_mem->mem_type) {
    235   1.1  riastrad 	case TTM_PL_VRAM:
    236   1.1  riastrad 		old_start += rdev->mc.vram_start;
    237   1.1  riastrad 		break;
    238   1.1  riastrad 	case TTM_PL_TT:
    239   1.1  riastrad 		old_start += rdev->mc.gtt_start;
    240   1.1  riastrad 		break;
    241   1.1  riastrad 	default:
    242   1.1  riastrad 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
    243   1.1  riastrad 		return -EINVAL;
    244   1.1  riastrad 	}
    245   1.1  riastrad 	switch (new_mem->mem_type) {
    246   1.1  riastrad 	case TTM_PL_VRAM:
    247   1.1  riastrad 		new_start += rdev->mc.vram_start;
    248   1.1  riastrad 		break;
    249   1.1  riastrad 	case TTM_PL_TT:
    250   1.1  riastrad 		new_start += rdev->mc.gtt_start;
    251   1.1  riastrad 		break;
    252   1.1  riastrad 	default:
    253   1.1  riastrad 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
    254   1.1  riastrad 		return -EINVAL;
    255   1.1  riastrad 	}
    256   1.1  riastrad 	if (!rdev->ring[ridx].ready) {
    257   1.1  riastrad 		DRM_ERROR("Trying to move memory with ring turned off.\n");
    258   1.1  riastrad 		return -EINVAL;
    259   1.1  riastrad 	}
    260   1.1  riastrad 
    261   1.1  riastrad 	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
    262   1.1  riastrad 
    263  1.10  riastrad 	num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
    264  1.18  riastrad 	fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
    265  1.10  riastrad 	if (IS_ERR(fence))
    266  1.10  riastrad 		return PTR_ERR(fence);
    267  1.10  riastrad 
    268  1.18  riastrad 	r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
    269   1.1  riastrad 	radeon_fence_unref(&fence);
    270   1.1  riastrad 	return r;
    271   1.1  riastrad }
    272   1.1  riastrad 
    273   1.1  riastrad static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
    274   1.1  riastrad 				bool evict, bool interruptible,
    275   1.1  riastrad 				bool no_wait_gpu,
    276   1.1  riastrad 				struct ttm_mem_reg *new_mem)
    277   1.1  riastrad {
    278  1.18  riastrad 	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
    279   1.1  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
    280   1.1  riastrad 	struct ttm_mem_reg tmp_mem;
    281  1.10  riastrad 	struct ttm_place placements;
    282   1.1  riastrad 	struct ttm_placement placement;
    283   1.1  riastrad 	int r;
    284   1.1  riastrad 
    285   1.1  riastrad 	tmp_mem = *new_mem;
    286   1.1  riastrad 	tmp_mem.mm_node = NULL;
    287   1.1  riastrad 	placement.num_placement = 1;
    288   1.1  riastrad 	placement.placement = &placements;
    289   1.1  riastrad 	placement.num_busy_placement = 1;
    290   1.1  riastrad 	placement.busy_placement = &placements;
    291  1.10  riastrad 	placements.fpfn = 0;
    292  1.10  riastrad 	placements.lpfn = 0;
    293  1.10  riastrad 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
    294  1.18  riastrad 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
    295   1.1  riastrad 	if (unlikely(r)) {
    296   1.1  riastrad 		return r;
    297   1.1  riastrad 	}
    298   1.1  riastrad 
    299   1.1  riastrad 	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
    300   1.1  riastrad 	if (unlikely(r)) {
    301   1.1  riastrad 		goto out_cleanup;
    302   1.1  riastrad 	}
    303   1.1  riastrad 
    304  1.18  riastrad 	r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
    305   1.1  riastrad 	if (unlikely(r)) {
    306   1.1  riastrad 		goto out_cleanup;
    307   1.1  riastrad 	}
    308   1.1  riastrad 	r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
    309   1.1  riastrad 	if (unlikely(r)) {
    310   1.1  riastrad 		goto out_cleanup;
    311   1.1  riastrad 	}
    312  1.18  riastrad 	r = ttm_bo_move_ttm(bo, &ctx, new_mem);
    313   1.1  riastrad out_cleanup:
    314   1.1  riastrad 	ttm_bo_mem_put(bo, &tmp_mem);
    315   1.1  riastrad 	return r;
    316   1.1  riastrad }
    317   1.1  riastrad 
    318   1.1  riastrad static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
    319   1.1  riastrad 				bool evict, bool interruptible,
    320   1.1  riastrad 				bool no_wait_gpu,
    321   1.1  riastrad 				struct ttm_mem_reg *new_mem)
    322   1.1  riastrad {
    323  1.18  riastrad 	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
    324   1.1  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
    325   1.1  riastrad 	struct ttm_mem_reg tmp_mem;
    326   1.1  riastrad 	struct ttm_placement placement;
    327  1.10  riastrad 	struct ttm_place placements;
    328   1.1  riastrad 	int r;
    329   1.1  riastrad 
    330   1.1  riastrad 	tmp_mem = *new_mem;
    331   1.1  riastrad 	tmp_mem.mm_node = NULL;
    332   1.1  riastrad 	placement.num_placement = 1;
    333   1.1  riastrad 	placement.placement = &placements;
    334   1.1  riastrad 	placement.num_busy_placement = 1;
    335   1.1  riastrad 	placement.busy_placement = &placements;
    336  1.10  riastrad 	placements.fpfn = 0;
    337  1.10  riastrad 	placements.lpfn = 0;
    338  1.10  riastrad 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
    339  1.18  riastrad 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
    340   1.1  riastrad 	if (unlikely(r)) {
    341   1.1  riastrad 		return r;
    342   1.1  riastrad 	}
    343  1.18  riastrad 	r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
    344   1.1  riastrad 	if (unlikely(r)) {
    345   1.1  riastrad 		goto out_cleanup;
    346   1.1  riastrad 	}
    347   1.1  riastrad 	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
    348   1.1  riastrad 	if (unlikely(r)) {
    349   1.1  riastrad 		goto out_cleanup;
    350   1.1  riastrad 	}
    351   1.1  riastrad out_cleanup:
    352   1.1  riastrad 	ttm_bo_mem_put(bo, &tmp_mem);
    353   1.1  riastrad 	return r;
    354   1.1  riastrad }
    355   1.1  riastrad 
    356  1.18  riastrad static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
    357  1.18  riastrad 			  struct ttm_operation_ctx *ctx,
    358  1.18  riastrad 			  struct ttm_mem_reg *new_mem)
    359   1.1  riastrad {
    360   1.1  riastrad 	struct radeon_device *rdev;
    361  1.18  riastrad 	struct radeon_bo *rbo;
    362   1.1  riastrad 	struct ttm_mem_reg *old_mem = &bo->mem;
    363   1.1  riastrad 	int r;
    364   1.1  riastrad 
    365  1.18  riastrad 	r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
    366  1.18  riastrad 	if (r)
    367  1.18  riastrad 		return r;
    368  1.18  riastrad 
    369  1.18  riastrad 	/* Can't move a pinned BO */
    370  1.18  riastrad 	rbo = container_of(bo, struct radeon_bo, tbo);
    371  1.18  riastrad 	if (WARN_ON_ONCE(rbo->pin_count > 0))
    372  1.18  riastrad 		return -EINVAL;
    373  1.18  riastrad 
    374   1.1  riastrad 	rdev = radeon_get_rdev(bo->bdev);
    375   1.1  riastrad 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
    376   1.1  riastrad 		radeon_move_null(bo, new_mem);
    377   1.1  riastrad 		return 0;
    378   1.1  riastrad 	}
    379   1.1  riastrad 	if ((old_mem->mem_type == TTM_PL_TT &&
    380   1.1  riastrad 	     new_mem->mem_type == TTM_PL_SYSTEM) ||
    381   1.1  riastrad 	    (old_mem->mem_type == TTM_PL_SYSTEM &&
    382   1.1  riastrad 	     new_mem->mem_type == TTM_PL_TT)) {
    383   1.1  riastrad 		/* bind is enough */
    384   1.1  riastrad 		radeon_move_null(bo, new_mem);
    385   1.1  riastrad 		return 0;
    386   1.1  riastrad 	}
    387   1.1  riastrad 	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
    388   1.1  riastrad 	    rdev->asic->copy.copy == NULL) {
    389   1.1  riastrad 		/* use memcpy */
    390   1.1  riastrad 		goto memcpy;
    391   1.1  riastrad 	}
    392   1.1  riastrad 
    393   1.1  riastrad 	if (old_mem->mem_type == TTM_PL_VRAM &&
    394   1.1  riastrad 	    new_mem->mem_type == TTM_PL_SYSTEM) {
    395  1.18  riastrad 		r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
    396  1.18  riastrad 					ctx->no_wait_gpu, new_mem);
    397   1.1  riastrad 	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
    398   1.1  riastrad 		   new_mem->mem_type == TTM_PL_VRAM) {
    399  1.18  riastrad 		r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
    400  1.18  riastrad 					    ctx->no_wait_gpu, new_mem);
    401   1.1  riastrad 	} else {
    402  1.18  riastrad 		r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
    403  1.18  riastrad 				     new_mem, old_mem);
    404   1.1  riastrad 	}
    405   1.1  riastrad 
    406   1.1  riastrad 	if (r) {
    407   1.1  riastrad memcpy:
    408  1.18  riastrad 		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
    409   1.1  riastrad 		if (r) {
    410   1.1  riastrad 			return r;
    411   1.1  riastrad 		}
    412   1.1  riastrad 	}
    413   1.1  riastrad 
    414   1.1  riastrad 	/* update statistics */
    415   1.1  riastrad 	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
    416   1.1  riastrad 	return 0;
    417   1.1  riastrad }
    418   1.1  riastrad 
    419   1.1  riastrad static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
    420   1.1  riastrad {
    421   1.1  riastrad 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    422   1.1  riastrad 	struct radeon_device *rdev = radeon_get_rdev(bdev);
    423   1.1  riastrad 
    424   1.1  riastrad 	mem->bus.addr = NULL;
    425   1.1  riastrad 	mem->bus.offset = 0;
    426   1.1  riastrad 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
    427   1.1  riastrad 	mem->bus.base = 0;
    428   1.1  riastrad 	mem->bus.is_iomem = false;
    429   1.1  riastrad 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
    430   1.1  riastrad 		return -EINVAL;
    431   1.1  riastrad 	switch (mem->mem_type) {
    432   1.1  riastrad 	case TTM_PL_SYSTEM:
    433   1.1  riastrad 		/* system memory */
    434   1.1  riastrad 		return 0;
    435   1.1  riastrad 	case TTM_PL_TT:
    436  1.10  riastrad #if IS_ENABLED(CONFIG_AGP)
    437   1.1  riastrad 		if (rdev->flags & RADEON_IS_AGP) {
    438   1.1  riastrad 			/* RADEON_IS_AGP is set only if AGP is active */
    439   1.1  riastrad 			mem->bus.offset = mem->start << PAGE_SHIFT;
    440   1.1  riastrad 			mem->bus.base = rdev->mc.agp_base;
    441   1.1  riastrad 			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
    442   1.7  riastrad 			KASSERTMSG((mem->bus.base & (PAGE_SIZE - 1)) == 0,
    443  1.24   hannken 			    "agp aperture is not page-aligned: %" PRIx64 "",
    444  1.24   hannken 			    (uint64_t)mem->bus.base);
    445   1.7  riastrad 			KASSERT((mem->bus.offset & (PAGE_SIZE - 1)) == 0);
    446   1.1  riastrad 		}
    447   1.1  riastrad #endif
    448   1.1  riastrad 		break;
    449   1.1  riastrad 	case TTM_PL_VRAM:
    450   1.1  riastrad 		mem->bus.offset = mem->start << PAGE_SHIFT;
    451   1.1  riastrad 		/* check if it's visible */
    452   1.1  riastrad 		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
    453   1.1  riastrad 			return -EINVAL;
    454   1.1  riastrad 		mem->bus.base = rdev->mc.aper_base;
    455   1.1  riastrad 		mem->bus.is_iomem = true;
    456  1.26  riastrad #ifndef __NetBSD__		/* alpha hose handled through bus_space(9) */
    457   1.1  riastrad #ifdef __alpha__
    458   1.1  riastrad 		/*
    459   1.1  riastrad 		 * Alpha: use bus.addr to hold the ioremap() return,
    460   1.1  riastrad 		 * so we can modify bus.base below.
    461   1.1  riastrad 		 */
    462   1.1  riastrad 		if (mem->placement & TTM_PL_FLAG_WC)
    463   1.1  riastrad 			mem->bus.addr =
    464   1.1  riastrad 				ioremap_wc(mem->bus.base + mem->bus.offset,
    465   1.1  riastrad 					   mem->bus.size);
    466   1.1  riastrad 		else
    467   1.1  riastrad 			mem->bus.addr =
    468  1.18  riastrad 				ioremap(mem->bus.base + mem->bus.offset,
    469   1.1  riastrad 						mem->bus.size);
    470  1.18  riastrad 		if (!mem->bus.addr)
    471  1.18  riastrad 			return -ENOMEM;
    472   1.1  riastrad 
    473   1.1  riastrad 		/*
    474   1.1  riastrad 		 * Alpha: Use just the bus offset plus
    475   1.1  riastrad 		 * the hose/domain memory base for bus.base.
    476   1.1  riastrad 		 * It then can be used to build PTEs for VRAM
    477   1.1  riastrad 		 * access, as done in ttm_bo_vm_fault().
    478   1.1  riastrad 		 */
    479   1.1  riastrad 		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
    480   1.1  riastrad 			rdev->ddev->hose->dense_mem_base;
    481   1.1  riastrad #endif
    482  1.26  riastrad #endif
    483   1.7  riastrad 		KASSERTMSG((mem->bus.base & (PAGE_SIZE - 1)) == 0,
    484  1.24   hannken 		    "mc aperture is not page-aligned: %" PRIx64 "",
    485  1.24   hannken 		    (uint64_t)mem->bus.base);
    486   1.7  riastrad 		KASSERT((mem->bus.offset & (PAGE_SIZE - 1)) == 0);
    487   1.1  riastrad 		break;
    488   1.1  riastrad 	default:
    489   1.1  riastrad 		return -EINVAL;
    490   1.1  riastrad 	}
    491   1.1  riastrad 	return 0;
    492   1.1  riastrad }
    493   1.1  riastrad 
    494   1.1  riastrad static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
    495   1.1  riastrad {
    496   1.1  riastrad }
    497   1.1  riastrad 
    498  1.10  riastrad /*
    499  1.10  riastrad  * TTM backend functions.
    500  1.10  riastrad  */
    501  1.10  riastrad struct radeon_ttm_tt {
    502  1.10  riastrad 	struct ttm_dma_tt		ttm;
    503  1.10  riastrad 	struct radeon_device		*rdev;
    504  1.10  riastrad 	u64				offset;
    505  1.10  riastrad 
    506  1.10  riastrad 	uint64_t			userptr;
    507  1.16  riastrad #ifdef __NetBSD__
    508  1.16  riastrad 	struct vmspace			*usermm;
    509  1.16  riastrad #else
    510  1.10  riastrad 	struct mm_struct		*usermm;
    511  1.16  riastrad #endif
    512  1.10  riastrad 	uint32_t			userflags;
    513  1.10  riastrad };
    514  1.10  riastrad 
    515  1.10  riastrad /* prepare the sg table with the user pages */
    516  1.10  riastrad static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
    517   1.1  riastrad {
    518  1.10  riastrad 	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
    519  1.10  riastrad 	struct radeon_ttm_tt *gtt = (void *)ttm;
    520  1.16  riastrad #ifndef __NetBSD__
    521  1.10  riastrad 	unsigned pinned = 0, nents;
    522  1.16  riastrad #endif
    523  1.10  riastrad 	int r;
    524  1.10  riastrad 
    525  1.10  riastrad 	int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
    526  1.16  riastrad #ifndef __NetBSD__
    527  1.10  riastrad 	enum dma_data_direction direction = write ?
    528  1.10  riastrad 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
    529  1.16  riastrad #endif
    530  1.10  riastrad 
    531  1.16  riastrad #ifdef __NetBSD__
    532  1.16  riastrad 	if (curproc->p_vmspace != gtt->usermm)
    533  1.16  riastrad 		return -EPERM;
    534  1.16  riastrad #else
    535  1.10  riastrad 	if (current->mm != gtt->usermm)
    536  1.10  riastrad 		return -EPERM;
    537  1.16  riastrad #endif
    538  1.10  riastrad 
    539  1.10  riastrad 	if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
    540  1.10  riastrad 		/* check that we only pin down anonymous memory
    541  1.10  riastrad 		   to prevent problems with writeback */
    542  1.10  riastrad 		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
    543  1.16  riastrad #ifdef __NetBSD__
    544  1.16  riastrad 		/* XXX ???  TOCTOU, anyone?  */
    545  1.16  riastrad 		/* XXX should do range_test */
    546  1.16  riastrad 		struct vm_map_entry *entry;
    547  1.16  riastrad 		bool ok;
    548  1.16  riastrad 		vm_map_lock_read(&gtt->usermm->vm_map);
    549  1.16  riastrad 		ok = uvm_map_lookup_entry(&gtt->usermm->vm_map,
    550  1.16  riastrad 		    (vaddr_t)gtt->userptr, &entry);
    551  1.16  riastrad 		if (ok)
    552  1.16  riastrad 			ok = !UVM_ET_ISOBJ(entry) && end <= entry->end;
    553  1.16  riastrad 		vm_map_unlock_read(&gtt->usermm->vm_map);
    554  1.16  riastrad 		if (!ok)
    555  1.16  riastrad 			return -EPERM;
    556  1.16  riastrad #else
    557  1.10  riastrad 		struct vm_area_struct *vma;
    558  1.10  riastrad 		vma = find_vma(gtt->usermm, gtt->userptr);
    559  1.10  riastrad 		if (!vma || vma->vm_file || vma->vm_end < end)
    560  1.10  riastrad 			return -EPERM;
    561  1.16  riastrad #endif
    562  1.10  riastrad 	}
    563  1.10  riastrad 
    564  1.16  riastrad #ifdef __NetBSD__
    565  1.16  riastrad 	struct iovec iov = {
    566  1.16  riastrad 		.iov_base = (void *)(vaddr_t)gtt->userptr,
    567  1.16  riastrad 		.iov_len = ttm->num_pages << PAGE_SHIFT,
    568  1.16  riastrad 	};
    569  1.16  riastrad 	struct uio uio = {
    570  1.16  riastrad 		.uio_iov = &iov,
    571  1.16  riastrad 		.uio_iovcnt = 1,
    572  1.16  riastrad 		.uio_offset = 0,
    573  1.16  riastrad 		.uio_resid = ttm->num_pages << PAGE_SHIFT,
    574  1.16  riastrad 		.uio_rw = (write ? UIO_READ : UIO_WRITE), /* XXX ??? */
    575  1.16  riastrad 		.uio_vmspace = gtt->usermm,
    576  1.16  riastrad 	};
    577  1.16  riastrad 	unsigned long i;
    578  1.16  riastrad 
    579  1.16  riastrad 	/* Wire the relevant part of the user's address space.  */
    580  1.16  riastrad 	/* XXX What happens if user does munmap?  */
    581  1.16  riastrad 	/* XXX errno NetBSD->Linux */
    582  1.16  riastrad 	r = -uvm_vslock(gtt->usermm, (void *)(vaddr_t)gtt->userptr,
    583  1.16  riastrad 	    ttm->num_pages << PAGE_SHIFT,
    584  1.16  riastrad 	    (write ? VM_PROT_WRITE : VM_PROT_READ)); /* XXX ??? */
    585  1.16  riastrad 	if (r)
    586  1.16  riastrad 		goto fail0;
    587  1.16  riastrad 
    588  1.16  riastrad 	/* Load it up for DMA.  */
    589  1.16  riastrad 	/* XXX errno NetBSD->Linux */
    590  1.16  riastrad 	r = -bus_dmamap_load_uio(rdev->ddev->dmat, gtt->ttm.dma_address, &uio,
    591  1.16  riastrad 	    BUS_DMA_WAITOK);
    592  1.16  riastrad 	if (r)
    593  1.16  riastrad 		goto fail1;
    594  1.16  riastrad 
    595  1.16  riastrad 	/* Get each of the pages as ttm requests.  */
    596  1.16  riastrad 	for (i = 0; i < ttm->num_pages; i++) {
    597  1.16  riastrad 		vaddr_t va = (vaddr_t)gtt->userptr + (i << PAGE_SHIFT);
    598  1.16  riastrad 		paddr_t pa;
    599  1.16  riastrad 		struct vm_page *vmp;
    600  1.16  riastrad 
    601  1.16  riastrad 		if (!pmap_extract(gtt->usermm->vm_map.pmap, va, &pa)) {
    602  1.16  riastrad 			r = -EFAULT;
    603  1.16  riastrad 			goto fail2;
    604  1.16  riastrad 		}
    605  1.16  riastrad 		vmp = PHYS_TO_VM_PAGE(pa);
    606  1.16  riastrad 		ttm->pages[i] = container_of(vmp, struct page, p_vmp);
    607  1.16  riastrad 	}
    608  1.16  riastrad 
    609  1.16  riastrad 	/* Success!  */
    610  1.16  riastrad 	return 0;
    611  1.16  riastrad 
    612  1.16  riastrad fail2:	while (i --> 0)
    613  1.16  riastrad 		ttm->pages[i] = NULL; /* paranoia */
    614  1.16  riastrad 	bus_dmamap_unload(rdev->ddev->dmat, gtt->ttm.dma_address);
    615  1.16  riastrad fail1:	uvm_vsunlock(gtt->usermm, (void *)(vaddr_t)gtt->userptr,
    616  1.16  riastrad 	    ttm->num_pages << PAGE_SHIFT);
    617  1.16  riastrad fail0:	return r;
    618  1.16  riastrad #else
    619  1.10  riastrad 	do {
    620  1.10  riastrad 		unsigned num_pages = ttm->num_pages - pinned;
    621  1.10  riastrad 		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
    622  1.10  riastrad 		struct page **pages = ttm->pages + pinned;
    623  1.10  riastrad 
    624  1.18  riastrad 		r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
    625  1.18  riastrad 				   pages, NULL);
    626  1.10  riastrad 		if (r < 0)
    627  1.10  riastrad 			goto release_pages;
    628  1.10  riastrad 
    629  1.10  riastrad 		pinned += r;
    630  1.10  riastrad 
    631  1.10  riastrad 	} while (pinned < ttm->num_pages);
    632  1.10  riastrad 
    633  1.10  riastrad 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
    634  1.10  riastrad 				      ttm->num_pages << PAGE_SHIFT,
    635  1.10  riastrad 				      GFP_KERNEL);
    636  1.10  riastrad 	if (r)
    637  1.10  riastrad 		goto release_sg;
    638  1.10  riastrad 
    639  1.10  riastrad 	r = -ENOMEM;
    640  1.10  riastrad 	nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
    641  1.10  riastrad 	if (nents != ttm->sg->nents)
    642  1.10  riastrad 		goto release_sg;
    643  1.10  riastrad 
    644  1.10  riastrad 	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
    645  1.10  riastrad 					 gtt->ttm.dma_address, ttm->num_pages);
    646   1.1  riastrad 
    647   1.1  riastrad 	return 0;
    648  1.10  riastrad 
    649  1.10  riastrad release_sg:
    650  1.10  riastrad 	kfree(ttm->sg);
    651  1.10  riastrad 
    652  1.10  riastrad release_pages:
    653  1.18  riastrad 	release_pages(ttm->pages, pinned);
    654  1.10  riastrad 	return r;
    655  1.12  riastrad #endif
    656   1.1  riastrad }
    657   1.1  riastrad 
    658  1.10  riastrad static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
    659   1.1  riastrad {
    660  1.12  riastrad #ifdef __NetBSD__
    661  1.16  riastrad 	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
    662  1.16  riastrad 	struct radeon_ttm_tt *gtt = (void *)ttm;
    663  1.16  riastrad 
    664  1.16  riastrad 	bus_dmamap_unload(rdev->ddev->dmat, gtt->ttm.dma_address);
    665  1.16  riastrad 	uvm_vsunlock(gtt->usermm, (void *)(vaddr_t)gtt->userptr,
    666  1.16  riastrad 	    ttm->num_pages << PAGE_SHIFT);
    667  1.12  riastrad #else
    668  1.10  riastrad 	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
    669  1.10  riastrad 	struct radeon_ttm_tt *gtt = (void *)ttm;
    670  1.10  riastrad 	struct sg_page_iter sg_iter;
    671  1.10  riastrad 
    672  1.10  riastrad 	int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
    673  1.10  riastrad 	enum dma_data_direction direction = write ?
    674  1.10  riastrad 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
    675  1.10  riastrad 
    676  1.10  riastrad 	/* double check that we don't free the table twice */
    677  1.10  riastrad 	if (!ttm->sg->sgl)
    678  1.10  riastrad 		return;
    679  1.10  riastrad 
    680  1.10  riastrad 	/* free the sg table and pages again */
    681  1.10  riastrad 	dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
    682  1.10  riastrad 
    683  1.10  riastrad 	for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
    684  1.10  riastrad 		struct page *page = sg_page_iter_page(&sg_iter);
    685  1.10  riastrad 		if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
    686  1.10  riastrad 			set_page_dirty(page);
    687   1.1  riastrad 
    688  1.10  riastrad 		mark_page_accessed(page);
    689  1.18  riastrad 		put_page(page);
    690  1.10  riastrad 	}
    691   1.1  riastrad 
    692  1.10  riastrad 	sg_free_table(ttm->sg);
    693  1.12  riastrad #endif
    694   1.1  riastrad }
    695   1.1  riastrad 
    696   1.1  riastrad static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
    697   1.1  riastrad 				   struct ttm_mem_reg *bo_mem)
    698   1.1  riastrad {
    699   1.1  riastrad 	struct radeon_ttm_tt *gtt = (void*)ttm;
    700  1.10  riastrad 	uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
    701  1.10  riastrad 		RADEON_GART_PAGE_WRITE;
    702   1.1  riastrad 	int r;
    703   1.1  riastrad 
    704  1.10  riastrad 	if (gtt->userptr) {
    705  1.10  riastrad 		radeon_ttm_tt_pin_userptr(ttm);
    706  1.10  riastrad 		flags &= ~RADEON_GART_PAGE_WRITE;
    707  1.10  riastrad 	}
    708  1.10  riastrad 
    709   1.1  riastrad 	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
    710   1.1  riastrad 	if (!ttm->num_pages) {
    711   1.1  riastrad 		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
    712   1.1  riastrad 		     ttm->num_pages, bo_mem, ttm);
    713   1.1  riastrad 	}
    714  1.10  riastrad 	if (ttm->caching_state == tt_cached)
    715  1.10  riastrad 		flags |= RADEON_GART_PAGE_SNOOP;
    716  1.10  riastrad 	r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
    717  1.10  riastrad 			     ttm->pages, gtt->ttm.dma_address, flags);
    718   1.1  riastrad 	if (r) {
    719   1.1  riastrad 		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
    720   1.1  riastrad 			  ttm->num_pages, (unsigned)gtt->offset);
    721   1.1  riastrad 		return r;
    722   1.1  riastrad 	}
    723   1.1  riastrad 	return 0;
    724   1.1  riastrad }
    725   1.1  riastrad 
    726   1.1  riastrad static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
    727   1.1  riastrad {
    728   1.1  riastrad 	struct radeon_ttm_tt *gtt = (void *)ttm;
    729   1.1  riastrad 
    730   1.1  riastrad 	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
    731  1.10  riastrad 
    732  1.10  riastrad 	if (gtt->userptr)
    733  1.10  riastrad 		radeon_ttm_tt_unpin_userptr(ttm);
    734  1.10  riastrad 
    735   1.1  riastrad 	return 0;
    736   1.1  riastrad }
    737   1.1  riastrad 
    738   1.1  riastrad static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
    739   1.1  riastrad {
    740   1.1  riastrad 	struct radeon_ttm_tt *gtt = (void *)ttm;
    741   1.1  riastrad 
    742   1.1  riastrad 	ttm_dma_tt_fini(&gtt->ttm);
    743   1.1  riastrad 	kfree(gtt);
    744   1.1  riastrad }
    745   1.1  riastrad 
    746   1.1  riastrad static struct ttm_backend_func radeon_backend_func = {
    747   1.1  riastrad 	.bind = &radeon_ttm_backend_bind,
    748   1.1  riastrad 	.unbind = &radeon_ttm_backend_unbind,
    749   1.1  riastrad 	.destroy = &radeon_ttm_backend_destroy,
    750   1.1  riastrad };
    751   1.1  riastrad 
    752  1.18  riastrad static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
    753  1.18  riastrad 					   uint32_t page_flags)
    754   1.1  riastrad {
    755   1.1  riastrad 	struct radeon_device *rdev;
    756   1.1  riastrad 	struct radeon_ttm_tt *gtt;
    757   1.1  riastrad 
    758  1.18  riastrad 	rdev = radeon_get_rdev(bo->bdev);
    759  1.10  riastrad #if IS_ENABLED(CONFIG_AGP)
    760   1.1  riastrad 	if (rdev->flags & RADEON_IS_AGP) {
    761  1.18  riastrad 		return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
    762  1.18  riastrad 					 page_flags);
    763   1.1  riastrad 	}
    764   1.1  riastrad #endif
    765   1.1  riastrad 
    766   1.1  riastrad 	gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
    767   1.1  riastrad 	if (gtt == NULL) {
    768   1.1  riastrad 		return NULL;
    769   1.1  riastrad 	}
    770   1.1  riastrad 	gtt->ttm.ttm.func = &radeon_backend_func;
    771   1.1  riastrad 	gtt->rdev = rdev;
    772  1.18  riastrad 	if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
    773   1.1  riastrad 		kfree(gtt);
    774   1.1  riastrad 		return NULL;
    775   1.1  riastrad 	}
    776   1.1  riastrad 	return &gtt->ttm.ttm;
    777   1.1  riastrad }
    778   1.1  riastrad 
    779  1.10  riastrad static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
    780  1.10  riastrad {
    781  1.10  riastrad 	if (!ttm || ttm->func != &radeon_backend_func)
    782  1.10  riastrad 		return NULL;
    783  1.10  riastrad 	return (struct radeon_ttm_tt *)ttm;
    784  1.10  riastrad }
    785  1.10  riastrad 
    786  1.18  riastrad static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
    787  1.18  riastrad 			struct ttm_operation_ctx *ctx)
    788   1.1  riastrad {
    789  1.10  riastrad 	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
    790  1.14  riastrad #if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
    791  1.10  riastrad 	struct radeon_device *rdev;
    792  1.14  riastrad #endif
    793   1.1  riastrad 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
    794   1.1  riastrad 
    795  1.10  riastrad 	if (gtt && gtt->userptr) {
    796  1.12  riastrad #ifdef __NetBSD__
    797  1.16  riastrad 		ttm->sg = NULL;
    798  1.12  riastrad #else
    799  1.10  riastrad 		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
    800  1.10  riastrad 		if (!ttm->sg)
    801  1.10  riastrad 			return -ENOMEM;
    802  1.16  riastrad #endif
    803  1.10  riastrad 
    804  1.10  riastrad 		ttm->page_flags |= TTM_PAGE_FLAG_SG;
    805  1.10  riastrad 		ttm->state = tt_unbound;
    806  1.10  riastrad 		return 0;
    807  1.10  riastrad 	}
    808  1.10  riastrad 
    809   1.1  riastrad 	if (slave && ttm->sg) {
    810  1.15  riastrad #ifdef __NetBSD__
    811  1.21  riastrad 		int r = drm_prime_bus_dmamap_load_sgt(ttm->bdev->dmat,
    812  1.15  riastrad 		    gtt->ttm.dma_address, ttm->sg);
    813  1.15  riastrad 		if (r)
    814  1.15  riastrad 			return r;
    815   1.2  riastrad #else
    816   1.1  riastrad 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
    817   1.1  riastrad 						 gtt->ttm.dma_address, ttm->num_pages);
    818  1.15  riastrad #endif
    819   1.1  riastrad 		ttm->state = tt_unbound;
    820   1.1  riastrad 		return 0;
    821   1.1  riastrad 	}
    822   1.1  riastrad 
    823  1.14  riastrad #if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
    824  1.10  riastrad 	rdev = radeon_get_rdev(ttm->bdev);
    825  1.14  riastrad #endif
    826  1.10  riastrad #if IS_ENABLED(CONFIG_AGP)
    827   1.1  riastrad 	if (rdev->flags & RADEON_IS_AGP) {
    828  1.18  riastrad 		return ttm_agp_tt_populate(ttm, ctx);
    829   1.1  riastrad 	}
    830   1.1  riastrad #endif
    831   1.1  riastrad 
    832   1.2  riastrad #ifdef __NetBSD__
    833   1.2  riastrad 	/* XXX errno NetBSD->Linux */
    834   1.2  riastrad 	return ttm_bus_dma_populate(&gtt->ttm);
    835   1.2  riastrad #else
    836   1.2  riastrad 
    837   1.1  riastrad #ifdef CONFIG_SWIOTLB
    838  1.18  riastrad 	if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
    839  1.18  riastrad 		return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
    840   1.1  riastrad 	}
    841   1.1  riastrad #endif
    842   1.1  riastrad 
    843  1.18  riastrad 	return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
    844   1.2  riastrad #endif
    845   1.1  riastrad }
    846   1.1  riastrad 
    847   1.1  riastrad static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
    848   1.1  riastrad {
    849  1.14  riastrad #if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
    850  1.10  riastrad 	struct radeon_device *rdev;
    851  1.14  riastrad #endif
    852  1.10  riastrad 	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
    853   1.1  riastrad 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
    854   1.1  riastrad 
    855  1.17   tsutsui #ifdef __NetBSD__
    856  1.17   tsutsui 	if (slave && ttm->sg) {
    857  1.17   tsutsui 		bus_dmamap_unload(ttm->bdev->dmat, gtt->ttm.dma_address);
    858  1.17   tsutsui 	}
    859  1.17   tsutsui #endif
    860  1.10  riastrad 	if (gtt && gtt->userptr) {
    861  1.10  riastrad 		kfree(ttm->sg);
    862  1.10  riastrad 		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
    863  1.10  riastrad 		return;
    864  1.10  riastrad 	}
    865  1.10  riastrad 
    866   1.1  riastrad 	if (slave)
    867   1.1  riastrad 		return;
    868   1.1  riastrad 
    869  1.14  riastrad #if !defined(__NetBSD__) || IS_ENABLED(CONFIG_AGP)
    870  1.10  riastrad 	rdev = radeon_get_rdev(ttm->bdev);
    871  1.14  riastrad #endif
    872  1.10  riastrad #if IS_ENABLED(CONFIG_AGP)
    873   1.1  riastrad 	if (rdev->flags & RADEON_IS_AGP) {
    874   1.1  riastrad 		ttm_agp_tt_unpopulate(ttm);
    875   1.1  riastrad 		return;
    876   1.1  riastrad 	}
    877   1.1  riastrad #endif
    878   1.1  riastrad 
    879   1.2  riastrad #ifdef __NetBSD__
    880   1.2  riastrad 	ttm_bus_dma_unpopulate(&gtt->ttm);
    881   1.2  riastrad 	return;
    882   1.2  riastrad #else
    883   1.2  riastrad 
    884   1.1  riastrad #ifdef CONFIG_SWIOTLB
    885  1.18  riastrad 	if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
    886   1.1  riastrad 		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
    887   1.1  riastrad 		return;
    888   1.1  riastrad 	}
    889   1.1  riastrad #endif
    890   1.1  riastrad 
    891  1.18  riastrad 	ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
    892   1.2  riastrad #endif
    893   1.1  riastrad }
    894   1.1  riastrad 
    895   1.3  riastrad #ifdef __NetBSD__
    896   1.8  riastrad static void radeon_ttm_tt_swapout(struct ttm_tt *ttm)
    897   1.8  riastrad {
    898   1.8  riastrad 	struct radeon_ttm_tt *gtt = container_of(ttm, struct radeon_ttm_tt,
    899   1.8  riastrad 	    ttm.ttm);
    900   1.8  riastrad 	struct ttm_dma_tt *ttm_dma = &gtt->ttm;
    901   1.8  riastrad 
    902   1.8  riastrad 	ttm_bus_dma_swapout(ttm_dma);
    903   1.8  riastrad }
    904   1.8  riastrad 
    905   1.3  riastrad static int	radeon_ttm_fault(struct uvm_faultinfo *, vaddr_t,
    906   1.3  riastrad 		    struct vm_page **, int, int, vm_prot_t, int);
    907   1.3  riastrad 
    908   1.3  riastrad static const struct uvm_pagerops radeon_uvm_ops = {
    909   1.3  riastrad 	.pgo_reference = &ttm_bo_uvm_reference,
    910   1.3  riastrad 	.pgo_detach = &ttm_bo_uvm_detach,
    911   1.3  riastrad 	.pgo_fault = &radeon_ttm_fault,
    912   1.3  riastrad };
    913   1.3  riastrad #endif
    914   1.3  riastrad 
    915  1.10  riastrad int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
    916  1.10  riastrad 			      uint32_t flags)
    917  1.10  riastrad {
    918  1.10  riastrad 	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
    919  1.10  riastrad 
    920  1.10  riastrad 	if (gtt == NULL)
    921  1.10  riastrad 		return -EINVAL;
    922  1.10  riastrad 
    923  1.10  riastrad 	gtt->userptr = addr;
    924  1.16  riastrad #ifdef __NetBSD__
    925  1.16  riastrad 	gtt->usermm = curproc->p_vmspace;
    926  1.16  riastrad #else
    927  1.10  riastrad 	gtt->usermm = current->mm;
    928  1.16  riastrad #endif
    929  1.10  riastrad 	gtt->userflags = flags;
    930  1.10  riastrad 	return 0;
    931  1.10  riastrad }
    932  1.10  riastrad 
    933  1.10  riastrad bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
    934  1.10  riastrad {
    935  1.10  riastrad 	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
    936  1.10  riastrad 
    937  1.10  riastrad 	if (gtt == NULL)
    938  1.10  riastrad 		return false;
    939  1.10  riastrad 
    940  1.10  riastrad 	return !!gtt->userptr;
    941  1.10  riastrad }
    942  1.10  riastrad 
    943  1.10  riastrad bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
    944  1.10  riastrad {
    945  1.10  riastrad 	struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
    946  1.10  riastrad 
    947  1.10  riastrad 	if (gtt == NULL)
    948  1.10  riastrad 		return false;
    949  1.10  riastrad 
    950  1.10  riastrad 	return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
    951  1.10  riastrad }
    952  1.10  riastrad 
    953   1.1  riastrad static struct ttm_bo_driver radeon_bo_driver = {
    954   1.1  riastrad 	.ttm_tt_create = &radeon_ttm_tt_create,
    955   1.1  riastrad 	.ttm_tt_populate = &radeon_ttm_tt_populate,
    956   1.1  riastrad 	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
    957   1.3  riastrad #ifdef __NetBSD__
    958   1.8  riastrad 	.ttm_tt_swapout = &radeon_ttm_tt_swapout,
    959   1.3  riastrad 	.ttm_uvm_ops = &radeon_uvm_ops,
    960   1.3  riastrad #endif
    961   1.1  riastrad 	.invalidate_caches = &radeon_invalidate_caches,
    962   1.1  riastrad 	.init_mem_type = &radeon_init_mem_type,
    963  1.18  riastrad 	.eviction_valuable = ttm_bo_eviction_valuable,
    964   1.1  riastrad 	.evict_flags = &radeon_evict_flags,
    965   1.1  riastrad 	.move = &radeon_bo_move,
    966   1.1  riastrad 	.verify_access = &radeon_verify_access,
    967   1.1  riastrad 	.move_notify = &radeon_bo_move_notify,
    968   1.1  riastrad 	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
    969   1.1  riastrad 	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
    970   1.1  riastrad 	.io_mem_free = &radeon_ttm_io_mem_free,
    971   1.1  riastrad };
    972   1.1  riastrad 
    973   1.1  riastrad int radeon_ttm_init(struct radeon_device *rdev)
    974   1.1  riastrad {
    975   1.1  riastrad 	int r;
    976   1.1  riastrad 
    977   1.1  riastrad 	/* No others user of address space so set it to 0 */
    978   1.1  riastrad 	r = ttm_bo_device_init(&rdev->mman.bdev,
    979   1.1  riastrad 			       &radeon_bo_driver,
    980   1.2  riastrad #ifdef __NetBSD__
    981   1.2  riastrad 			       rdev->ddev->bst,
    982   1.2  riastrad 			       rdev->ddev->dmat,
    983   1.2  riastrad #else
    984   1.1  riastrad 			       rdev->ddev->anon_inode->i_mapping,
    985   1.2  riastrad #endif
    986  1.18  riastrad 			       rdev->ddev->vma_offset_manager,
    987  1.23  riastrad 			       dma_addressing_limited(pci_dev_dev(rdev->pdev)));
    988   1.1  riastrad 	if (r) {
    989   1.1  riastrad 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
    990   1.1  riastrad 		return r;
    991   1.1  riastrad 	}
    992   1.1  riastrad 	rdev->mman.initialized = true;
    993   1.1  riastrad 	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
    994   1.1  riastrad 				rdev->mc.real_vram_size >> PAGE_SHIFT);
    995   1.1  riastrad 	if (r) {
    996   1.1  riastrad 		DRM_ERROR("Failed initializing VRAM heap.\n");
    997   1.1  riastrad 		return r;
    998   1.1  riastrad 	}
    999   1.1  riastrad 	/* Change the size here instead of the init above so only lpfn is affected */
   1000   1.1  riastrad 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
   1001   1.1  riastrad 
   1002   1.1  riastrad 	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
   1003  1.10  riastrad 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
   1004  1.18  riastrad 			     NULL, &rdev->stolen_vga_memory);
   1005   1.1  riastrad 	if (r) {
   1006   1.1  riastrad 		return r;
   1007   1.1  riastrad 	}
   1008  1.18  riastrad 	r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
   1009   1.1  riastrad 	if (r)
   1010   1.1  riastrad 		return r;
   1011  1.18  riastrad 	r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
   1012  1.18  riastrad 	radeon_bo_unreserve(rdev->stolen_vga_memory);
   1013   1.1  riastrad 	if (r) {
   1014  1.18  riastrad 		radeon_bo_unref(&rdev->stolen_vga_memory);
   1015   1.1  riastrad 		return r;
   1016   1.1  riastrad 	}
   1017   1.1  riastrad 	DRM_INFO("radeon: %uM of VRAM memory ready\n",
   1018   1.1  riastrad 		 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
   1019   1.1  riastrad 	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
   1020   1.1  riastrad 				rdev->mc.gtt_size >> PAGE_SHIFT);
   1021   1.1  riastrad 	if (r) {
   1022   1.1  riastrad 		DRM_ERROR("Failed initializing GTT heap.\n");
   1023   1.1  riastrad 		return r;
   1024   1.1  riastrad 	}
   1025   1.1  riastrad 	DRM_INFO("radeon: %uM of GTT memory ready.\n",
   1026   1.1  riastrad 		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
   1027   1.1  riastrad 
   1028   1.1  riastrad 	r = radeon_ttm_debugfs_init(rdev);
   1029   1.1  riastrad 	if (r) {
   1030   1.1  riastrad 		DRM_ERROR("Failed to init debugfs\n");
   1031   1.1  riastrad 		return r;
   1032   1.1  riastrad 	}
   1033   1.1  riastrad 	return 0;
   1034   1.1  riastrad }
   1035   1.1  riastrad 
   1036   1.1  riastrad void radeon_ttm_fini(struct radeon_device *rdev)
   1037   1.1  riastrad {
   1038   1.1  riastrad 	int r;
   1039   1.1  riastrad 
   1040   1.1  riastrad 	if (!rdev->mman.initialized)
   1041   1.1  riastrad 		return;
   1042   1.1  riastrad 	radeon_ttm_debugfs_fini(rdev);
   1043  1.18  riastrad 	if (rdev->stolen_vga_memory) {
   1044  1.18  riastrad 		r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
   1045   1.1  riastrad 		if (r == 0) {
   1046  1.18  riastrad 			radeon_bo_unpin(rdev->stolen_vga_memory);
   1047  1.18  riastrad 			radeon_bo_unreserve(rdev->stolen_vga_memory);
   1048   1.1  riastrad 		}
   1049  1.18  riastrad 		radeon_bo_unref(&rdev->stolen_vga_memory);
   1050   1.1  riastrad 	}
   1051   1.1  riastrad 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
   1052   1.1  riastrad 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
   1053   1.1  riastrad 	ttm_bo_device_release(&rdev->mman.bdev);
   1054   1.1  riastrad 	radeon_gart_fini(rdev);
   1055   1.1  riastrad 	rdev->mman.initialized = false;
   1056   1.1  riastrad 	DRM_INFO("radeon: ttm finalized\n");
   1057   1.1  riastrad }
   1058   1.1  riastrad 
   1059   1.1  riastrad /* this should only be called at bootup or when userspace
   1060   1.1  riastrad  * isn't running */
   1061   1.1  riastrad void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
   1062   1.1  riastrad {
   1063   1.1  riastrad 	struct ttm_mem_type_manager *man;
   1064   1.1  riastrad 
   1065   1.1  riastrad 	if (!rdev->mman.initialized)
   1066   1.1  riastrad 		return;
   1067   1.1  riastrad 
   1068   1.1  riastrad 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
   1069   1.1  riastrad 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
   1070   1.1  riastrad 	man->size = size >> PAGE_SHIFT;
   1071   1.1  riastrad }
   1072   1.1  riastrad 
   1073   1.2  riastrad #ifdef __NetBSD__
   1074   1.2  riastrad 
   1075   1.3  riastrad static int
   1076   1.2  riastrad radeon_ttm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
   1077   1.2  riastrad     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
   1078   1.2  riastrad     int flags)
   1079   1.2  riastrad {
   1080   1.2  riastrad 	struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
   1081   1.2  riastrad 	struct ttm_buffer_object *const bo = container_of(uobj,
   1082   1.2  riastrad 	    struct ttm_buffer_object, uvmobj);
   1083   1.2  riastrad 	struct radeon_device *const rdev = radeon_get_rdev(bo->bdev);
   1084   1.2  riastrad 	int error;
   1085   1.2  riastrad 
   1086   1.2  riastrad 	KASSERT(rdev != NULL);
   1087   1.2  riastrad 	down_read(&rdev->pm.mclk_lock);
   1088   1.2  riastrad 	error = ttm_bo_uvm_fault(ufi, vaddr, pps, npages, centeridx,
   1089   1.2  riastrad 	    access_type, flags);
   1090   1.2  riastrad 	up_read(&rdev->pm.mclk_lock);
   1091   1.2  riastrad 
   1092   1.2  riastrad 	return error;
   1093   1.2  riastrad }
   1094   1.2  riastrad 
   1095   1.2  riastrad int
   1096   1.2  riastrad radeon_mmap_object(struct drm_device *dev, off_t offset, size_t size,
   1097   1.2  riastrad     vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
   1098   1.2  riastrad     struct file *file)
   1099   1.2  riastrad {
   1100   1.2  riastrad 	struct radeon_device *rdev = dev->dev_private;
   1101   1.2  riastrad 
   1102   1.5  riastrad 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
   1103   1.2  riastrad 
   1104   1.2  riastrad 	if (__predict_false(rdev == NULL))	/* XXX How?? */
   1105   1.2  riastrad 		return -EINVAL;
   1106   1.2  riastrad 
   1107  1.13  riastrad 	return ttm_bo_mmap_object(&rdev->mman.bdev, offset, size, prot,
   1108  1.13  riastrad 	    uobjp, uoffsetp, file);
   1109   1.2  riastrad }
   1110   1.2  riastrad 
   1111   1.2  riastrad #else
   1112   1.2  riastrad 
   1113  1.18  riastrad static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
   1114   1.1  riastrad {
   1115   1.1  riastrad 	struct ttm_buffer_object *bo;
   1116   1.1  riastrad 	struct radeon_device *rdev;
   1117  1.18  riastrad 	vm_fault_t ret;
   1118   1.1  riastrad 
   1119  1.18  riastrad 	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
   1120  1.18  riastrad 	if (bo == NULL)
   1121   1.1  riastrad 		return VM_FAULT_NOPAGE;
   1122  1.18  riastrad 
   1123   1.1  riastrad 	rdev = radeon_get_rdev(bo->bdev);
   1124   1.1  riastrad 	down_read(&rdev->pm.mclk_lock);
   1125  1.18  riastrad 	ret = ttm_bo_vm_fault(vmf);
   1126   1.1  riastrad 	up_read(&rdev->pm.mclk_lock);
   1127  1.18  riastrad 	return ret;
   1128   1.1  riastrad }
   1129   1.1  riastrad 
   1130  1.18  riastrad static struct vm_operations_struct radeon_ttm_vm_ops = {
   1131  1.18  riastrad 	.fault = radeon_ttm_fault,
   1132  1.18  riastrad 	.open = ttm_bo_vm_open,
   1133  1.18  riastrad 	.close = ttm_bo_vm_close,
   1134  1.18  riastrad 	.access = ttm_bo_vm_access
   1135  1.18  riastrad };
   1136  1.18  riastrad 
   1137   1.1  riastrad int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
   1138   1.1  riastrad {
   1139   1.1  riastrad 	int r;
   1140  1.18  riastrad 	struct drm_file *file_priv = filp->private_data;
   1141  1.18  riastrad 	struct radeon_device *rdev = file_priv->minor->dev->dev_private;
   1142   1.1  riastrad 
   1143  1.18  riastrad 	if (rdev == NULL)
   1144  1.10  riastrad 		return -EINVAL;
   1145   1.1  riastrad 
   1146   1.1  riastrad 	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
   1147  1.18  riastrad 	if (unlikely(r != 0))
   1148   1.1  riastrad 		return r;
   1149  1.18  riastrad 
   1150   1.1  riastrad 	vma->vm_ops = &radeon_ttm_vm_ops;
   1151   1.1  riastrad 	return 0;
   1152   1.1  riastrad }
   1153   1.1  riastrad 
   1154   1.2  riastrad #endif	/* __NetBSD__ */
   1155   1.2  riastrad 
   1156   1.1  riastrad #if defined(CONFIG_DEBUG_FS)
   1157   1.1  riastrad 
   1158   1.1  riastrad static int radeon_mm_dump_table(struct seq_file *m, void *data)
   1159   1.1  riastrad {
   1160   1.1  riastrad 	struct drm_info_node *node = (struct drm_info_node *)m->private;
   1161  1.18  riastrad 	unsigned ttm_pl = *(int*)node->info_ent->data;
   1162   1.1  riastrad 	struct drm_device *dev = node->minor->dev;
   1163   1.1  riastrad 	struct radeon_device *rdev = dev->dev_private;
   1164  1.18  riastrad 	struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
   1165  1.18  riastrad 	struct drm_printer p = drm_seq_file_printer(m);
   1166  1.18  riastrad 
   1167  1.18  riastrad 	man->func->debug(man, &p);
   1168  1.18  riastrad 	return 0;
   1169   1.1  riastrad }
   1170   1.1  riastrad 
   1171  1.18  riastrad 
   1172   1.1  riastrad static int ttm_pl_vram = TTM_PL_VRAM;
   1173   1.1  riastrad static int ttm_pl_tt = TTM_PL_TT;
   1174   1.1  riastrad 
   1175   1.1  riastrad static struct drm_info_list radeon_ttm_debugfs_list[] = {
   1176   1.1  riastrad 	{"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
   1177   1.1  riastrad 	{"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
   1178   1.1  riastrad 	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
   1179   1.1  riastrad #ifdef CONFIG_SWIOTLB
   1180   1.1  riastrad 	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
   1181   1.1  riastrad #endif
   1182   1.1  riastrad };
   1183   1.1  riastrad 
   1184   1.1  riastrad static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
   1185   1.1  riastrad {
   1186   1.1  riastrad 	struct radeon_device *rdev = inode->i_private;
   1187   1.1  riastrad 	i_size_write(inode, rdev->mc.mc_vram_size);
   1188   1.1  riastrad 	filep->private_data = inode->i_private;
   1189   1.1  riastrad 	return 0;
   1190   1.1  riastrad }
   1191   1.1  riastrad 
   1192   1.1  riastrad static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
   1193   1.1  riastrad 				    size_t size, loff_t *pos)
   1194   1.1  riastrad {
   1195   1.1  riastrad 	struct radeon_device *rdev = f->private_data;
   1196   1.1  riastrad 	ssize_t result = 0;
   1197   1.1  riastrad 	int r;
   1198   1.1  riastrad 
   1199   1.1  riastrad 	if (size & 0x3 || *pos & 0x3)
   1200   1.1  riastrad 		return -EINVAL;
   1201   1.1  riastrad 
   1202   1.1  riastrad 	while (size) {
   1203   1.1  riastrad 		unsigned long flags;
   1204   1.1  riastrad 		uint32_t value;
   1205   1.1  riastrad 
   1206   1.1  riastrad 		if (*pos >= rdev->mc.mc_vram_size)
   1207   1.1  riastrad 			return result;
   1208   1.1  riastrad 
   1209   1.1  riastrad 		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
   1210   1.1  riastrad 		WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
   1211   1.1  riastrad 		if (rdev->family >= CHIP_CEDAR)
   1212   1.1  riastrad 			WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
   1213   1.1  riastrad 		value = RREG32(RADEON_MM_DATA);
   1214   1.1  riastrad 		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
   1215   1.1  riastrad 
   1216   1.1  riastrad 		r = put_user(value, (uint32_t *)buf);
   1217   1.1  riastrad 		if (r)
   1218   1.1  riastrad 			return r;
   1219   1.1  riastrad 
   1220   1.1  riastrad 		result += 4;
   1221   1.1  riastrad 		buf += 4;
   1222   1.1  riastrad 		*pos += 4;
   1223   1.1  riastrad 		size -= 4;
   1224   1.1  riastrad 	}
   1225   1.1  riastrad 
   1226   1.1  riastrad 	return result;
   1227   1.1  riastrad }
   1228   1.1  riastrad 
   1229   1.1  riastrad static const struct file_operations radeon_ttm_vram_fops = {
   1230   1.1  riastrad 	.owner = THIS_MODULE,
   1231   1.1  riastrad 	.open = radeon_ttm_vram_open,
   1232   1.1  riastrad 	.read = radeon_ttm_vram_read,
   1233   1.1  riastrad 	.llseek = default_llseek
   1234   1.1  riastrad };
   1235   1.1  riastrad 
   1236   1.1  riastrad static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
   1237   1.1  riastrad {
   1238   1.1  riastrad 	struct radeon_device *rdev = inode->i_private;
   1239   1.1  riastrad 	i_size_write(inode, rdev->mc.gtt_size);
   1240   1.1  riastrad 	filep->private_data = inode->i_private;
   1241   1.1  riastrad 	return 0;
   1242   1.1  riastrad }
   1243   1.1  riastrad 
   1244   1.1  riastrad static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
   1245   1.1  riastrad 				   size_t size, loff_t *pos)
   1246   1.1  riastrad {
   1247   1.1  riastrad 	struct radeon_device *rdev = f->private_data;
   1248   1.1  riastrad 	ssize_t result = 0;
   1249   1.1  riastrad 	int r;
   1250   1.1  riastrad 
   1251   1.1  riastrad 	while (size) {
   1252   1.1  riastrad 		loff_t p = *pos / PAGE_SIZE;
   1253   1.1  riastrad 		unsigned off = *pos & ~PAGE_MASK;
   1254   1.1  riastrad 		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
   1255   1.1  riastrad 		struct page *page;
   1256   1.1  riastrad 		void *ptr;
   1257   1.1  riastrad 
   1258   1.1  riastrad 		if (p >= rdev->gart.num_cpu_pages)
   1259   1.1  riastrad 			return result;
   1260   1.1  riastrad 
   1261   1.1  riastrad 		page = rdev->gart.pages[p];
   1262   1.1  riastrad 		if (page) {
   1263   1.1  riastrad 			ptr = kmap(page);
   1264   1.1  riastrad 			ptr += off;
   1265   1.1  riastrad 
   1266   1.1  riastrad 			r = copy_to_user(buf, ptr, cur_size);
   1267   1.1  riastrad 			kunmap(rdev->gart.pages[p]);
   1268   1.1  riastrad 		} else
   1269   1.1  riastrad 			r = clear_user(buf, cur_size);
   1270   1.1  riastrad 
   1271   1.1  riastrad 		if (r)
   1272   1.1  riastrad 			return -EFAULT;
   1273   1.1  riastrad 
   1274   1.1  riastrad 		result += cur_size;
   1275   1.1  riastrad 		buf += cur_size;
   1276   1.1  riastrad 		*pos += cur_size;
   1277   1.1  riastrad 		size -= cur_size;
   1278   1.1  riastrad 	}
   1279   1.1  riastrad 
   1280   1.1  riastrad 	return result;
   1281   1.1  riastrad }
   1282   1.1  riastrad 
   1283   1.1  riastrad static const struct file_operations radeon_ttm_gtt_fops = {
   1284   1.1  riastrad 	.owner = THIS_MODULE,
   1285   1.1  riastrad 	.open = radeon_ttm_gtt_open,
   1286   1.1  riastrad 	.read = radeon_ttm_gtt_read,
   1287   1.1  riastrad 	.llseek = default_llseek
   1288   1.1  riastrad };
   1289   1.1  riastrad 
   1290   1.1  riastrad #endif
   1291   1.1  riastrad 
   1292   1.1  riastrad static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
   1293   1.1  riastrad {
   1294   1.1  riastrad #if defined(CONFIG_DEBUG_FS)
   1295   1.1  riastrad 	unsigned count;
   1296   1.1  riastrad 
   1297   1.1  riastrad 	struct drm_minor *minor = rdev->ddev->primary;
   1298  1.18  riastrad 	struct dentry *root = minor->debugfs_root;
   1299  1.18  riastrad 
   1300  1.18  riastrad 	rdev->mman.vram = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO,
   1301  1.18  riastrad 					      root, rdev,
   1302  1.18  riastrad 					      &radeon_ttm_vram_fops);
   1303   1.1  riastrad 
   1304  1.18  riastrad 	rdev->mman.gtt = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO,
   1305  1.18  riastrad 					     root, rdev, &radeon_ttm_gtt_fops);
   1306   1.1  riastrad 
   1307   1.1  riastrad 	count = ARRAY_SIZE(radeon_ttm_debugfs_list);
   1308   1.1  riastrad 
   1309   1.1  riastrad #ifdef CONFIG_SWIOTLB
   1310  1.18  riastrad 	if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
   1311   1.1  riastrad 		--count;
   1312   1.1  riastrad #endif
   1313   1.1  riastrad 
   1314   1.1  riastrad 	return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
   1315   1.1  riastrad #else
   1316   1.1  riastrad 
   1317   1.1  riastrad 	return 0;
   1318   1.1  riastrad #endif
   1319   1.1  riastrad }
   1320   1.1  riastrad 
   1321   1.1  riastrad static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
   1322   1.1  riastrad {
   1323   1.1  riastrad #if defined(CONFIG_DEBUG_FS)
   1324   1.1  riastrad 
   1325   1.1  riastrad 	debugfs_remove(rdev->mman.vram);
   1326   1.1  riastrad 	rdev->mman.vram = NULL;
   1327   1.1  riastrad 
   1328   1.1  riastrad 	debugfs_remove(rdev->mman.gtt);
   1329   1.1  riastrad 	rdev->mman.gtt = NULL;
   1330   1.1  riastrad #endif
   1331   1.1  riastrad }
   1332