1 /* $NetBSD: i915_gem_object.h,v 1.5 2021/12/19 11:33:30 riastradh Exp $ */ 2 3 /* 4 * SPDX-License-Identifier: MIT 5 * 6 * Copyright 2016 Intel Corporation 7 */ 8 9 #ifndef __I915_GEM_OBJECT_H__ 10 #define __I915_GEM_OBJECT_H__ 11 12 #include <drm/drm_gem.h> 13 #include <drm/drm_file.h> 14 #include <drm/drm_device.h> 15 16 #include <drm/i915_drm.h> 17 18 #include "display/intel_frontbuffer.h" 19 #include "i915_gem_object_types.h" 20 #include "i915_gem_gtt.h" 21 #include "i915_vma_types.h" 22 23 void i915_gem_init__objects(struct drm_i915_private *i915); 24 25 struct drm_i915_gem_object *i915_gem_object_alloc(void); 26 void i915_gem_object_free(struct drm_i915_gem_object *obj); 27 28 void i915_gem_object_init(struct drm_i915_gem_object *obj, 29 const struct drm_i915_gem_object_ops *ops, 30 struct lock_class_key *key); 31 struct drm_i915_gem_object * 32 i915_gem_object_create_shmem(struct drm_i915_private *i915, 33 resource_size_t size); 34 struct drm_i915_gem_object * 35 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, 36 const void *data, resource_size_t size); 37 38 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; 39 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 40 struct sg_table *pages, 41 bool needs_clflush); 42 43 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); 44 45 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 46 void i915_gem_free_object(struct drm_gem_object *obj); 47 48 void i915_gem_flush_free_objects(struct drm_i915_private *i915); 49 50 struct sg_table * 51 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); 52 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 53 54 /** 55 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 56 * @filp: DRM file private date 57 * @handle: userspace handle 58 * 59 * Returns: 60 * 61 * A pointer to the object named by the handle if such exists on @filp, NULL 62 * otherwise. This object is only valid whilst under the RCU read lock, and 63 * note carefully the object may be in the process of being destroyed. 64 */ 65 static inline struct drm_i915_gem_object * 66 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 67 { 68 #if IS_ENABLED(CONFIG_LOCKDEP) 69 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 70 #endif 71 return idr_find(&file->object_idr, handle); 72 } 73 74 static inline struct drm_i915_gem_object * 75 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) 76 { 77 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 78 obj = NULL; 79 80 return obj; 81 } 82 83 static inline struct drm_i915_gem_object * 84 i915_gem_object_lookup(struct drm_file *file, u32 handle) 85 { 86 struct drm_i915_gem_object *obj; 87 88 rcu_read_lock(); 89 obj = i915_gem_object_lookup_rcu(file, handle); 90 obj = i915_gem_object_get_rcu(obj); 91 rcu_read_unlock(); 92 93 return obj; 94 } 95 96 __deprecated 97 struct drm_gem_object * 98 drm_gem_object_lookup(struct drm_file *file, u32 handle); 99 100 __attribute__((nonnull)) 101 static inline struct drm_i915_gem_object * 102 i915_gem_object_get(struct drm_i915_gem_object *obj) 103 { 104 drm_gem_object_get(&obj->base); 105 return obj; 106 } 107 108 __attribute__((nonnull)) 109 static inline void 110 i915_gem_object_put(struct drm_i915_gem_object *obj) 111 { 112 __drm_gem_object_put(&obj->base); 113 } 114 115 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 116 117 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) 118 { 119 dma_resv_lock(obj->base.resv, NULL); 120 } 121 122 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) 123 { 124 return dma_resv_trylock(obj->base.resv); 125 } 126 127 static inline int 128 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) 129 { 130 return dma_resv_lock_interruptible(obj->base.resv, NULL); 131 } 132 133 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 134 { 135 dma_resv_unlock(obj->base.resv); 136 } 137 138 struct dma_fence * 139 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); 140 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, 141 struct dma_fence *fence); 142 143 static inline void 144 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) 145 { 146 obj->flags |= I915_BO_READONLY; 147 } 148 149 static inline bool 150 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) 151 { 152 return obj->flags & I915_BO_READONLY; 153 } 154 155 static inline bool 156 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) 157 { 158 return obj->flags & I915_BO_ALLOC_CONTIGUOUS; 159 } 160 161 static inline bool 162 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) 163 { 164 return obj->flags & I915_BO_ALLOC_VOLATILE; 165 } 166 167 static inline void 168 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) 169 { 170 obj->flags |= I915_BO_ALLOC_VOLATILE; 171 } 172 173 static inline bool 174 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 175 unsigned long flags) 176 { 177 return obj->ops->flags & flags; 178 } 179 180 static inline bool 181 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 182 { 183 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE); 184 } 185 186 static inline bool 187 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 188 { 189 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); 190 } 191 192 static inline bool 193 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) 194 { 195 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); 196 } 197 198 static inline bool 199 i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj) 200 { 201 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT); 202 } 203 204 static inline bool 205 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) 206 { 207 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL); 208 } 209 210 static inline bool 211 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 212 { 213 return READ_ONCE(obj->frontbuffer); 214 } 215 216 static inline unsigned int 217 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) 218 { 219 return obj->tiling_and_stride & TILING_MASK; 220 } 221 222 static inline bool 223 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) 224 { 225 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 226 } 227 228 static inline unsigned int 229 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) 230 { 231 return obj->tiling_and_stride & STRIDE_MASK; 232 } 233 234 static inline unsigned int 235 i915_gem_tile_height(unsigned int tiling) 236 { 237 GEM_BUG_ON(!tiling); 238 return tiling == I915_TILING_Y ? 32 : 8; 239 } 240 241 static inline unsigned int 242 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) 243 { 244 return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); 245 } 246 247 static inline unsigned int 248 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) 249 { 250 return (i915_gem_object_get_stride(obj) * 251 i915_gem_object_get_tile_height(obj)); 252 } 253 254 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 255 unsigned int tiling, unsigned int stride); 256 257 struct scatterlist * 258 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 259 unsigned int n, unsigned int *offset); 260 261 struct page * 262 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 263 unsigned int n); 264 265 struct page * 266 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 267 unsigned int n); 268 269 dma_addr_t 270 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, 271 unsigned long n, 272 unsigned int *len); 273 274 dma_addr_t 275 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 276 unsigned long n); 277 278 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 279 struct sg_table *pages, 280 unsigned int sg_page_sizes); 281 282 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 283 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 284 285 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ 286 I915_MM_NORMAL = 0, 287 /* 288 * Only used by struct_mutex, when called "recursively" from 289 * direct-reclaim-esque. Safe because there is only every one 290 * struct_mutex in the entire system. 291 */ 292 I915_MM_SHRINKER = 1, 293 /* 294 * Used for obj->mm.lock when allocating pages. Safe because the object 295 * isn't yet on any LRU, and therefore the shrinker can't deadlock on 296 * it. As soon as the object has pages, obj->mm.lock nests within 297 * fs_reclaim. 298 */ 299 I915_MM_GET_PAGES = 1, 300 }; 301 302 static inline int __must_check 303 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 304 { 305 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); 306 307 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 308 return 0; 309 310 return __i915_gem_object_get_pages(obj); 311 } 312 313 static inline bool 314 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 315 { 316 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 317 } 318 319 static inline void 320 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 321 { 322 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 323 324 atomic_inc(&obj->mm.pages_pin_count); 325 } 326 327 static inline bool 328 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 329 { 330 return atomic_read(&obj->mm.pages_pin_count); 331 } 332 333 static inline void 334 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 335 { 336 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 337 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 338 339 atomic_dec(&obj->mm.pages_pin_count); 340 } 341 342 static inline void 343 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 344 { 345 __i915_gem_object_unpin_pages(obj); 346 } 347 348 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 349 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 350 void i915_gem_object_writeback(struct drm_i915_gem_object *obj); 351 352 enum i915_map_type { 353 I915_MAP_WB = 0, 354 I915_MAP_WC, 355 #define I915_MAP_OVERRIDE BIT(31) 356 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 357 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 358 }; 359 360 /** 361 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 362 * @obj: the object to map into kernel address space 363 * @type: the type of mapping, used to select pgprot_t 364 * 365 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 366 * pages and then returns a contiguous mapping of the backing storage into 367 * the kernel address space. Based on the @type of mapping, the PTE will be 368 * set to either WriteBack or WriteCombine (via pgprot_t). 369 * 370 * The caller is responsible for calling i915_gem_object_unpin_map() when the 371 * mapping is no longer required. 372 * 373 * Returns the pointer through which to access the mapped object, or an 374 * ERR_PTR() on error. 375 */ 376 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 377 enum i915_map_type type); 378 379 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 380 unsigned long offset, 381 unsigned long size); 382 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 383 { 384 __i915_gem_object_flush_map(obj, 0, obj->base.size); 385 } 386 387 /** 388 * i915_gem_object_unpin_map - releases an earlier mapping 389 * @obj: the object to unmap 390 * 391 * After pinning the object and mapping its pages, once you are finished 392 * with your access, call i915_gem_object_unpin_map() to release the pin 393 * upon the mapping. Once the pin count reaches zero, that mapping may be 394 * removed. 395 */ 396 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 397 { 398 i915_gem_object_unpin_pages(obj); 399 } 400 401 void 402 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, 403 unsigned int flush_domains); 404 405 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 406 unsigned int *needs_clflush); 407 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, 408 unsigned int *needs_clflush); 409 #define CLFLUSH_BEFORE BIT(0) 410 #define CLFLUSH_AFTER BIT(1) 411 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 412 413 static inline void 414 i915_gem_object_finish_access(struct drm_i915_gem_object *obj) 415 { 416 i915_gem_object_unpin_pages(obj); 417 i915_gem_object_unlock(obj); 418 } 419 420 static inline struct intel_engine_cs * 421 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 422 { 423 struct intel_engine_cs *engine = NULL; 424 struct dma_fence *fence; 425 426 rcu_read_lock(); 427 fence = dma_resv_get_excl_rcu(obj->base.resv); 428 rcu_read_unlock(); 429 430 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 431 engine = to_request(fence)->engine; 432 dma_fence_put(fence); 433 434 return engine; 435 } 436 437 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 438 unsigned int cache_level); 439 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); 440 441 int __must_check 442 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 443 int __must_check 444 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 445 int __must_check 446 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 447 struct i915_vma * __must_check 448 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 449 u32 alignment, 450 const struct i915_ggtt_view *view, 451 unsigned int flags); 452 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 453 454 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 455 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); 456 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); 457 458 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 459 { 460 if (obj->cache_dirty) 461 return false; 462 463 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 464 return true; 465 466 /* Currently in use by HW (display engine)? Keep flushed. */ 467 return i915_gem_object_is_framebuffer(obj); 468 } 469 470 static inline void __start_cpu_write(struct drm_i915_gem_object *obj) 471 { 472 obj->read_domains = I915_GEM_DOMAIN_CPU; 473 obj->write_domain = I915_GEM_DOMAIN_CPU; 474 if (cpu_write_needs_clflush(obj)) 475 obj->cache_dirty = true; 476 } 477 478 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 479 unsigned int flags, 480 long timeout); 481 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 482 unsigned int flags, 483 const struct i915_sched_attr *attr); 484 485 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 486 enum fb_op_origin origin); 487 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 488 enum fb_op_origin origin); 489 490 static inline void 491 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 492 enum fb_op_origin origin) 493 { 494 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 495 __i915_gem_object_flush_frontbuffer(obj, origin); 496 } 497 498 static inline void 499 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 500 enum fb_op_origin origin) 501 { 502 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 503 __i915_gem_object_invalidate_frontbuffer(obj, origin); 504 } 505 506 #endif 507