Home | History | Annotate | Line # | Download | only in radeon
radeon_object.h revision 1.2.6.2
      1 /*
      2  * Copyright 2008 Advanced Micro Devices, Inc.
      3  * Copyright 2008 Red Hat Inc.
      4  * Copyright 2009 Jerome Glisse.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Dave Airlie
     25  *          Alex Deucher
     26  *          Jerome Glisse
     27  */
     28 #ifndef __RADEON_OBJECT_H__
     29 #define __RADEON_OBJECT_H__
     30 
     31 #include <drm/radeon_drm.h>
     32 #include "radeon.h"
     33 
     34 /**
     35  * radeon_mem_type_to_domain - return domain corresponding to mem_type
     36  * @mem_type:	ttm memory type
     37  *
     38  * Returns corresponding domain of the ttm mem_type
     39  */
     40 static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
     41 {
     42 	switch (mem_type) {
     43 	case TTM_PL_VRAM:
     44 		return RADEON_GEM_DOMAIN_VRAM;
     45 	case TTM_PL_TT:
     46 		return RADEON_GEM_DOMAIN_GTT;
     47 	case TTM_PL_SYSTEM:
     48 		return RADEON_GEM_DOMAIN_CPU;
     49 	default:
     50 		break;
     51 	}
     52 	return 0;
     53 }
     54 
     55 /**
     56  * radeon_bo_reserve - reserve bo
     57  * @bo:		bo structure
     58  * @no_intr:	don't return -ERESTARTSYS on pending signal
     59  *
     60  * Returns:
     61  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
     62  * a signal. Release all buffer reservations and return to user-space.
     63  */
     64 static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
     65 {
     66 	int r;
     67 
     68 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
     69 	if (unlikely(r != 0)) {
     70 		if (r != -ERESTARTSYS)
     71 			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
     72 		return r;
     73 	}
     74 	return 0;
     75 }
     76 
     77 static inline void radeon_bo_unreserve(struct radeon_bo *bo)
     78 {
     79 	ttm_bo_unreserve(&bo->tbo);
     80 }
     81 
     82 /**
     83  * radeon_bo_gpu_offset - return GPU offset of bo
     84  * @bo:	radeon object for which we query the offset
     85  *
     86  * Returns current GPU offset of the object.
     87  *
     88  * Note: object should either be pinned or reserved when calling this
     89  * function, it might be useful to add check for this for debugging.
     90  */
     91 static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
     92 {
     93 	return bo->tbo.offset;
     94 }
     95 
     96 static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
     97 {
     98 	return bo->tbo.num_pages << PAGE_SHIFT;
     99 }
    100 
    101 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
    102 {
    103 	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
    104 }
    105 
    106 static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
    107 {
    108 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
    109 }
    110 
    111 /**
    112  * radeon_bo_mmap_offset - return mmap offset of bo
    113  * @bo:	radeon object for which we query the offset
    114  *
    115  * Returns mmap offset of the object.
    116  */
    117 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
    118 {
    119 	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
    120 }
    121 
    122 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
    123 			  bool no_wait);
    124 
    125 extern int radeon_bo_create(struct radeon_device *rdev,
    126 			    unsigned long size, int byte_align,
    127 			    bool kernel, u32 domain,
    128 			    struct sg_table *sg,
    129 			    struct radeon_bo **bo_ptr);
    130 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
    131 extern void radeon_bo_kunmap(struct radeon_bo *bo);
    132 extern void radeon_bo_unref(struct radeon_bo **bo);
    133 extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
    134 extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
    135 				    u64 max_offset, u64 *gpu_addr);
    136 extern int radeon_bo_unpin(struct radeon_bo *bo);
    137 extern int radeon_bo_evict_vram(struct radeon_device *rdev);
    138 extern void radeon_bo_force_delete(struct radeon_device *rdev);
    139 extern int radeon_bo_init(struct radeon_device *rdev);
    140 extern void radeon_bo_fini(struct radeon_device *rdev);
    141 extern int radeon_bo_list_validate(struct radeon_device *rdev,
    142 				   struct ww_acquire_ctx *ticket,
    143 				   struct list_head *head, int ring);
    144 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
    145 				struct vm_area_struct *vma);
    146 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
    147 				u32 tiling_flags, u32 pitch);
    148 extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
    149 				u32 *tiling_flags, u32 *pitch);
    150 extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
    151 				bool force_drop);
    152 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
    153 				  struct ttm_mem_reg *new_mem);
    154 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
    155 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
    156 
    157 /*
    158  * sub allocation
    159  */
    160 
    161 static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
    162 {
    163 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
    164 }
    165 
    166 static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
    167 {
    168 	return (char *)sa_bo->manager->cpu_ptr + sa_bo->soffset;
    169 }
    170 
    171 extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
    172 				     struct radeon_sa_manager *sa_manager,
    173 				     unsigned size, u32 align, u32 domain);
    174 extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
    175 				      struct radeon_sa_manager *sa_manager);
    176 extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
    177 				      struct radeon_sa_manager *sa_manager);
    178 extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
    179 					struct radeon_sa_manager *sa_manager);
    180 extern int radeon_sa_bo_new(struct radeon_device *rdev,
    181 			    struct radeon_sa_manager *sa_manager,
    182 			    struct radeon_sa_bo **sa_bo,
    183 			    unsigned size, unsigned align);
    184 extern void radeon_sa_bo_free(struct radeon_device *rdev,
    185 			      struct radeon_sa_bo **sa_bo,
    186 			      struct radeon_fence *fence);
    187 #if defined(CONFIG_DEBUG_FS)
    188 extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
    189 					 struct seq_file *m);
    190 #endif
    191 
    192 
    193 #endif
    194