1 1.13 riastrad /* $NetBSD: intel_gtt.h,v 1.13 2022/08/20 23:19:09 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* SPDX-License-Identifier: MIT */ 4 1.1 riastrad /* 5 1.1 riastrad * Copyright 2020 Intel Corporation 6 1.1 riastrad * 7 1.1 riastrad * Please try to maintain the following order within this file unless it makes 8 1.1 riastrad * sense to do otherwise. From top to bottom: 9 1.1 riastrad * 1. typedefs 10 1.1 riastrad * 2. #defines, and macros 11 1.1 riastrad * 3. structure definitions 12 1.1 riastrad * 4. function prototypes 13 1.1 riastrad * 14 1.1 riastrad * Within each section, please try to order by generation in ascending order, 15 1.1 riastrad * from top to bottom (ie. gen6 on the top, gen8 on the bottom). 16 1.1 riastrad */ 17 1.1 riastrad 18 1.1 riastrad #ifndef __INTEL_GTT_H__ 19 1.1 riastrad #define __INTEL_GTT_H__ 20 1.1 riastrad 21 1.1 riastrad #include <linux/io-mapping.h> 22 1.12 riastrad #include <linux/ioport.h> 23 1.8 riastrad #include <linux/highmem.h> 24 1.1 riastrad #include <linux/kref.h> 25 1.1 riastrad #include <linux/mm.h> 26 1.1 riastrad #include <linux/pagevec.h> 27 1.1 riastrad #include <linux/scatterlist.h> 28 1.1 riastrad #include <linux/workqueue.h> 29 1.1 riastrad 30 1.1 riastrad #include <drm/drm_mm.h> 31 1.1 riastrad 32 1.1 riastrad #include "gt/intel_reset.h" 33 1.1 riastrad #include "i915_gem_fence_reg.h" 34 1.1 riastrad #include "i915_selftest.h" 35 1.1 riastrad #include "i915_vma_types.h" 36 1.1 riastrad 37 1.9 riastrad #ifdef __NetBSD__ 38 1.9 riastrad #include <drm/bus_dma_hacks.h> 39 1.9 riastrad #include <x86/machdep.h> 40 1.13 riastrad #include <machine/pte.h> 41 1.10 riastrad #define _PAGE_PRESENT PTE_P /* 0x01 PTE is present / valid */ 42 1.10 riastrad #define _PAGE_RW PTE_W /* 0x02 read/write */ 43 1.10 riastrad #define _PAGE_PWT PTE_PWT /* 0x08 write-through */ 44 1.10 riastrad #define _PAGE_PCD PTE_PCD /* 0x10 page cache disabled / non-cacheable */ 45 1.10 riastrad #define _PAGE_PAT PTE_PAT /* 0x80 page attribute table on PTE */ 46 1.9 riastrad #endif 47 1.9 riastrad 48 1.1 riastrad #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 49 1.1 riastrad 50 1.1 riastrad #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) 51 1.1 riastrad #define DBG(...) trace_printk(__VA_ARGS__) 52 1.1 riastrad #else 53 1.1 riastrad #define DBG(...) 54 1.1 riastrad #endif 55 1.1 riastrad 56 1.1 riastrad #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ 57 1.1 riastrad 58 1.1 riastrad #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) 59 1.1 riastrad #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) 60 1.1 riastrad #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) 61 1.1 riastrad 62 1.1 riastrad #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K 63 1.1 riastrad #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M 64 1.1 riastrad 65 1.1 riastrad #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE 66 1.1 riastrad 67 1.1 riastrad #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE 68 1.1 riastrad 69 1.1 riastrad #define I915_FENCE_REG_NONE -1 70 1.1 riastrad #define I915_MAX_NUM_FENCES 32 71 1.1 riastrad /* 32 fences + sign bit for FENCE_REG_NONE */ 72 1.1 riastrad #define I915_MAX_NUM_FENCE_BITS 6 73 1.1 riastrad 74 1.1 riastrad typedef u32 gen6_pte_t; 75 1.1 riastrad typedef u64 gen8_pte_t; 76 1.1 riastrad 77 1.1 riastrad #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 78 1.1 riastrad 79 1.1 riastrad #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) 80 1.1 riastrad #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) 81 1.1 riastrad #define I915_PDES 512 82 1.1 riastrad #define I915_PDE_MASK (I915_PDES - 1) 83 1.1 riastrad 84 1.1 riastrad /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ 85 1.1 riastrad #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 86 1.1 riastrad #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 87 1.1 riastrad #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 88 1.1 riastrad #define GEN6_PTE_CACHE_LLC (2 << 1) 89 1.1 riastrad #define GEN6_PTE_UNCACHED (1 << 1) 90 1.1 riastrad #define GEN6_PTE_VALID REG_BIT(0) 91 1.1 riastrad 92 1.1 riastrad #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) 93 1.1 riastrad #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) 94 1.1 riastrad #define GEN6_PD_ALIGN (PAGE_SIZE * 16) 95 1.1 riastrad #define GEN6_PDE_SHIFT 22 96 1.1 riastrad #define GEN6_PDE_VALID REG_BIT(0) 97 1.1 riastrad #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) 98 1.1 riastrad 99 1.1 riastrad #define GEN7_PTE_CACHE_L3_LLC (3 << 1) 100 1.1 riastrad 101 1.1 riastrad #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) 102 1.1 riastrad #define BYT_PTE_WRITEABLE REG_BIT(1) 103 1.1 riastrad 104 1.1 riastrad /* 105 1.1 riastrad * Cacheability Control is a 4-bit value. The low three bits are stored in bits 106 1.1 riastrad * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. 107 1.1 riastrad */ 108 1.1 riastrad #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ 109 1.1 riastrad (((bits) & 0x8) << (11 - 3))) 110 1.1 riastrad #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 111 1.1 riastrad #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 112 1.1 riastrad #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) 113 1.1 riastrad #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 114 1.1 riastrad #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) 115 1.1 riastrad #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 116 1.1 riastrad #define HSW_PTE_UNCACHED (0) 117 1.1 riastrad #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) 118 1.1 riastrad #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) 119 1.1 riastrad 120 1.1 riastrad /* 121 1.1 riastrad * GEN8 32b style address is defined as a 3 level page table: 122 1.1 riastrad * 31:30 | 29:21 | 20:12 | 11:0 123 1.1 riastrad * PDPE | PDE | PTE | offset 124 1.1 riastrad * The difference as compared to normal x86 3 level page table is the PDPEs are 125 1.1 riastrad * programmed via register. 126 1.1 riastrad * 127 1.1 riastrad * GEN8 48b style address is defined as a 4 level page table: 128 1.1 riastrad * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 129 1.1 riastrad * PML4E | PDPE | PDE | PTE | offset 130 1.1 riastrad */ 131 1.1 riastrad #define GEN8_3LVL_PDPES 4 132 1.1 riastrad 133 1.1 riastrad #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) 134 1.1 riastrad #define PPAT_CACHED_PDE 0 /* WB LLC */ 135 1.1 riastrad #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ 136 1.1 riastrad #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ 137 1.1 riastrad 138 1.1 riastrad #define CHV_PPAT_SNOOP REG_BIT(6) 139 1.1 riastrad #define GEN8_PPAT_AGE(x) ((x)<<4) 140 1.1 riastrad #define GEN8_PPAT_LLCeLLC (3<<2) 141 1.1 riastrad #define GEN8_PPAT_LLCELLC (2<<2) 142 1.1 riastrad #define GEN8_PPAT_LLC (1<<2) 143 1.1 riastrad #define GEN8_PPAT_WB (3<<0) 144 1.1 riastrad #define GEN8_PPAT_WT (2<<0) 145 1.1 riastrad #define GEN8_PPAT_WC (1<<0) 146 1.1 riastrad #define GEN8_PPAT_UC (0<<0) 147 1.1 riastrad #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) 148 1.1 riastrad #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) 149 1.1 riastrad 150 1.1 riastrad #define GEN8_PDE_IPS_64K BIT(11) 151 1.1 riastrad #define GEN8_PDE_PS_2M BIT(7) 152 1.1 riastrad 153 1.1 riastrad #define for_each_sgt_daddr(__dp, __iter, __sgt) \ 154 1.1 riastrad __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) 155 1.1 riastrad 156 1.1 riastrad struct i915_page_dma { 157 1.1 riastrad struct page *page; 158 1.4 riastrad #ifdef __NetBSD__ 159 1.4 riastrad union { 160 1.4 riastrad bus_dma_segment_t seg; 161 1.4 riastrad uint32_t ggtt_offset; 162 1.4 riastrad }; 163 1.4 riastrad bus_dmamap_t map; 164 1.4 riastrad #else 165 1.1 riastrad union { 166 1.1 riastrad dma_addr_t daddr; 167 1.1 riastrad 168 1.1 riastrad /* 169 1.1 riastrad * For gen6/gen7 only. This is the offset in the GGTT 170 1.1 riastrad * where the page directory entries for PPGTT begin 171 1.1 riastrad */ 172 1.1 riastrad u32 ggtt_offset; 173 1.1 riastrad }; 174 1.4 riastrad #endif 175 1.1 riastrad }; 176 1.1 riastrad 177 1.1 riastrad struct i915_page_scratch { 178 1.1 riastrad struct i915_page_dma base; 179 1.1 riastrad u64 encode; 180 1.1 riastrad }; 181 1.1 riastrad 182 1.1 riastrad struct i915_page_table { 183 1.1 riastrad struct i915_page_dma base; 184 1.1 riastrad atomic_t used; 185 1.1 riastrad }; 186 1.1 riastrad 187 1.1 riastrad struct i915_page_directory { 188 1.1 riastrad struct i915_page_table pt; 189 1.1 riastrad spinlock_t lock; 190 1.1 riastrad void *entry[512]; 191 1.1 riastrad }; 192 1.1 riastrad 193 1.1 riastrad #define __px_choose_expr(x, type, expr, other) \ 194 1.1 riastrad __builtin_choose_expr( \ 195 1.1 riastrad __builtin_types_compatible_p(typeof(x), type) || \ 196 1.1 riastrad __builtin_types_compatible_p(typeof(x), const type), \ 197 1.7 riastrad ({ type __x = (type)__UNCONST(x); expr; }), \ 198 1.1 riastrad other) 199 1.1 riastrad 200 1.1 riastrad #define px_base(px) \ 201 1.7 riastrad __px_choose_expr(px, struct i915_page_dma *, __x, \ 202 1.7 riastrad __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ 203 1.7 riastrad __px_choose_expr(px, struct i915_page_table *, &__x->base, \ 204 1.7 riastrad __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ 205 1.1 riastrad (void)0)))) 206 1.4 riastrad #ifdef __NetBSD__ 207 1.4 riastrad #define px_dma(px) (px_base(px)->map->dm_segs[0].ds_addr) 208 1.4 riastrad #else 209 1.1 riastrad #define px_dma(px) (px_base(px)->daddr) 210 1.4 riastrad #endif 211 1.1 riastrad 212 1.1 riastrad #define px_pt(px) \ 213 1.1 riastrad __px_choose_expr(px, struct i915_page_table *, __x, \ 214 1.1 riastrad __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ 215 1.1 riastrad (void)0)) 216 1.1 riastrad #define px_used(px) (&px_pt(px)->used) 217 1.1 riastrad 218 1.1 riastrad enum i915_cache_level; 219 1.1 riastrad 220 1.1 riastrad struct drm_i915_file_private; 221 1.1 riastrad struct drm_i915_gem_object; 222 1.1 riastrad struct i915_vma; 223 1.1 riastrad struct intel_gt; 224 1.1 riastrad 225 1.1 riastrad struct i915_vma_ops { 226 1.1 riastrad /* Map an object into an address space with the given cache flags. */ 227 1.1 riastrad int (*bind_vma)(struct i915_vma *vma, 228 1.1 riastrad enum i915_cache_level cache_level, 229 1.1 riastrad u32 flags); 230 1.1 riastrad /* 231 1.1 riastrad * Unmap an object from an address space. This usually consists of 232 1.1 riastrad * setting the valid PTE entries to a reserved scratch page. 233 1.1 riastrad */ 234 1.1 riastrad void (*unbind_vma)(struct i915_vma *vma); 235 1.1 riastrad 236 1.1 riastrad int (*set_pages)(struct i915_vma *vma); 237 1.1 riastrad void (*clear_pages)(struct i915_vma *vma); 238 1.1 riastrad }; 239 1.1 riastrad 240 1.1 riastrad struct pagestash { 241 1.4 riastrad #ifndef __NetBSD__ 242 1.1 riastrad spinlock_t lock; 243 1.1 riastrad struct pagevec pvec; 244 1.4 riastrad #endif 245 1.1 riastrad }; 246 1.1 riastrad 247 1.1 riastrad void stash_init(struct pagestash *stash); 248 1.1 riastrad 249 1.1 riastrad struct i915_address_space { 250 1.1 riastrad struct kref ref; 251 1.1 riastrad struct rcu_work rcu; 252 1.1 riastrad 253 1.1 riastrad struct drm_mm mm; 254 1.1 riastrad struct intel_gt *gt; 255 1.1 riastrad struct drm_i915_private *i915; 256 1.4 riastrad #ifdef __NetBSD__ 257 1.4 riastrad bus_dma_tag_t dmat; 258 1.4 riastrad #else 259 1.1 riastrad struct device *dma; 260 1.4 riastrad #endif 261 1.4 riastrad 262 1.1 riastrad /* 263 1.1 riastrad * Every address space belongs to a struct file - except for the global 264 1.1 riastrad * GTT that is owned by the driver (and so @file is set to NULL). In 265 1.1 riastrad * principle, no information should leak from one context to another 266 1.1 riastrad * (or between files/processes etc) unless explicitly shared by the 267 1.1 riastrad * owner. Tracking the owner is important in order to free up per-file 268 1.1 riastrad * objects along with the file, to aide resource tracking, and to 269 1.1 riastrad * assign blame. 270 1.1 riastrad */ 271 1.1 riastrad struct drm_i915_file_private *file; 272 1.1 riastrad u64 total; /* size addr space maps (ex. 2GB for ggtt) */ 273 1.1 riastrad u64 reserved; /* size addr space reserved */ 274 1.1 riastrad 275 1.1 riastrad unsigned int bind_async_flags; 276 1.1 riastrad 277 1.1 riastrad /* 278 1.1 riastrad * Each active user context has its own address space (in full-ppgtt). 279 1.1 riastrad * Since the vm may be shared between multiple contexts, we count how 280 1.1 riastrad * many contexts keep us "open". Once open hits zero, we are closed 281 1.1 riastrad * and do not allow any new attachments, and proceed to shutdown our 282 1.1 riastrad * vma and page directories. 283 1.1 riastrad */ 284 1.1 riastrad atomic_t open; 285 1.1 riastrad 286 1.1 riastrad struct mutex mutex; /* protects vma and our lists */ 287 1.1 riastrad #define VM_CLASS_GGTT 0 288 1.1 riastrad #define VM_CLASS_PPGTT 1 289 1.1 riastrad 290 1.1 riastrad struct i915_page_scratch scratch[4]; 291 1.1 riastrad unsigned int scratch_order; 292 1.1 riastrad unsigned int top; 293 1.1 riastrad 294 1.1 riastrad /** 295 1.1 riastrad * List of vma currently bound. 296 1.1 riastrad */ 297 1.1 riastrad struct list_head bound_list; 298 1.1 riastrad 299 1.4 riastrad #ifndef __NetBSD__ 300 1.1 riastrad struct pagestash free_pages; 301 1.4 riastrad #endif 302 1.1 riastrad 303 1.1 riastrad /* Global GTT */ 304 1.1 riastrad bool is_ggtt:1; 305 1.1 riastrad 306 1.1 riastrad /* Some systems require uncached updates of the page directories */ 307 1.1 riastrad bool pt_kmap_wc:1; 308 1.1 riastrad 309 1.1 riastrad /* Some systems support read-only mappings for GGTT and/or PPGTT */ 310 1.1 riastrad bool has_read_only:1; 311 1.1 riastrad 312 1.1 riastrad u64 (*pte_encode)(dma_addr_t addr, 313 1.1 riastrad enum i915_cache_level level, 314 1.1 riastrad u32 flags); /* Create a valid PTE */ 315 1.1 riastrad #define PTE_READ_ONLY BIT(0) 316 1.1 riastrad 317 1.1 riastrad int (*allocate_va_range)(struct i915_address_space *vm, 318 1.1 riastrad u64 start, u64 length); 319 1.1 riastrad void (*clear_range)(struct i915_address_space *vm, 320 1.1 riastrad u64 start, u64 length); 321 1.1 riastrad void (*insert_page)(struct i915_address_space *vm, 322 1.1 riastrad dma_addr_t addr, 323 1.1 riastrad u64 offset, 324 1.1 riastrad enum i915_cache_level cache_level, 325 1.1 riastrad u32 flags); 326 1.1 riastrad void (*insert_entries)(struct i915_address_space *vm, 327 1.1 riastrad struct i915_vma *vma, 328 1.1 riastrad enum i915_cache_level cache_level, 329 1.1 riastrad u32 flags); 330 1.1 riastrad void (*cleanup)(struct i915_address_space *vm); 331 1.1 riastrad 332 1.1 riastrad struct i915_vma_ops vma_ops; 333 1.1 riastrad 334 1.1 riastrad I915_SELFTEST_DECLARE(struct fault_attr fault_attr); 335 1.1 riastrad I915_SELFTEST_DECLARE(bool scrub_64K); 336 1.1 riastrad }; 337 1.1 riastrad 338 1.1 riastrad /* 339 1.1 riastrad * The Graphics Translation Table is the way in which GEN hardware translates a 340 1.1 riastrad * Graphics Virtual Address into a Physical Address. In addition to the normal 341 1.1 riastrad * collateral associated with any va->pa translations GEN hardware also has a 342 1.1 riastrad * portion of the GTT which can be mapped by the CPU and remain both coherent 343 1.1 riastrad * and correct (in cases like swizzling). That region is referred to as GMADR in 344 1.1 riastrad * the spec. 345 1.1 riastrad */ 346 1.1 riastrad struct i915_ggtt { 347 1.1 riastrad struct i915_address_space vm; 348 1.1 riastrad 349 1.1 riastrad struct io_mapping iomap; /* Mapping to our CPU mappable region */ 350 1.1 riastrad struct resource gmadr; /* GMADR resource */ 351 1.1 riastrad resource_size_t mappable_end; /* End offset that we can CPU map */ 352 1.1 riastrad 353 1.1 riastrad /** "Graphics Stolen Memory" holds the global PTEs */ 354 1.4 riastrad #ifdef __NetBSD__ 355 1.4 riastrad /* 356 1.4 riastrad * This is not actually the `Graphics Stolen Memory'; it is the 357 1.4 riastrad * graphics translation table, which we write to through the 358 1.4 riastrad * GTTADR/GTTMMADR PCI BAR, and which is backed by `Graphics 359 1.4 riastrad * GTT Stolen Memory'. That isn't the `Graphics Stolen Memory' 360 1.4 riastrad * either, although it is stolen from main memory. 361 1.4 riastrad */ 362 1.4 riastrad bus_space_tag_t gsmt; 363 1.4 riastrad bus_space_handle_t gsmh; 364 1.4 riastrad bus_size_t gsmsz; 365 1.4 riastrad 366 1.4 riastrad /* Maximum physical address that can be wired into a GTT entry. */ 367 1.4 riastrad uint64_t max_paddr; 368 1.4 riastrad 369 1.4 riastrad /* Page freelist for pages limited to the above maximum address. */ 370 1.4 riastrad int pgfl; 371 1.4 riastrad #else 372 1.1 riastrad void __iomem *gsm; 373 1.4 riastrad #endif 374 1.1 riastrad void (*invalidate)(struct i915_ggtt *ggtt); 375 1.1 riastrad 376 1.1 riastrad /** PPGTT used for aliasing the PPGTT with the GTT */ 377 1.1 riastrad struct i915_ppgtt *alias; 378 1.1 riastrad 379 1.1 riastrad bool do_idle_maps; 380 1.1 riastrad 381 1.1 riastrad int mtrr; 382 1.1 riastrad 383 1.1 riastrad /** Bit 6 swizzling required for X tiling */ 384 1.1 riastrad u32 bit_6_swizzle_x; 385 1.1 riastrad /** Bit 6 swizzling required for Y tiling */ 386 1.1 riastrad u32 bit_6_swizzle_y; 387 1.1 riastrad 388 1.1 riastrad u32 pin_bias; 389 1.1 riastrad 390 1.1 riastrad unsigned int num_fences; 391 1.1 riastrad struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; 392 1.1 riastrad struct list_head fence_list; 393 1.1 riastrad 394 1.1 riastrad /** 395 1.1 riastrad * List of all objects in gtt_space, currently mmaped by userspace. 396 1.1 riastrad * All objects within this list must also be on bound_list. 397 1.1 riastrad */ 398 1.1 riastrad struct list_head userfault_list; 399 1.1 riastrad 400 1.1 riastrad /* Manual runtime pm autosuspend delay for user GGTT mmaps */ 401 1.1 riastrad struct intel_wakeref_auto userfault_wakeref; 402 1.1 riastrad 403 1.1 riastrad struct mutex error_mutex; 404 1.1 riastrad struct drm_mm_node error_capture; 405 1.1 riastrad struct drm_mm_node uc_fw; 406 1.1 riastrad }; 407 1.1 riastrad 408 1.1 riastrad struct i915_ppgtt { 409 1.1 riastrad struct i915_address_space vm; 410 1.1 riastrad 411 1.1 riastrad struct i915_page_directory *pd; 412 1.1 riastrad }; 413 1.1 riastrad 414 1.1 riastrad #define i915_is_ggtt(vm) ((vm)->is_ggtt) 415 1.1 riastrad 416 1.1 riastrad static inline bool 417 1.1 riastrad i915_vm_is_4lvl(const struct i915_address_space *vm) 418 1.1 riastrad { 419 1.1 riastrad return (vm->total - 1) >> 32; 420 1.1 riastrad } 421 1.1 riastrad 422 1.1 riastrad static inline bool 423 1.1 riastrad i915_vm_has_scratch_64K(struct i915_address_space *vm) 424 1.1 riastrad { 425 1.1 riastrad return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); 426 1.1 riastrad } 427 1.1 riastrad 428 1.1 riastrad static inline bool 429 1.1 riastrad i915_vm_has_cache_coloring(struct i915_address_space *vm) 430 1.1 riastrad { 431 1.1 riastrad return i915_is_ggtt(vm) && vm->mm.color_adjust; 432 1.1 riastrad } 433 1.1 riastrad 434 1.1 riastrad static inline struct i915_ggtt * 435 1.1 riastrad i915_vm_to_ggtt(struct i915_address_space *vm) 436 1.1 riastrad { 437 1.1 riastrad BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); 438 1.1 riastrad GEM_BUG_ON(!i915_is_ggtt(vm)); 439 1.1 riastrad return container_of(vm, struct i915_ggtt, vm); 440 1.1 riastrad } 441 1.1 riastrad 442 1.1 riastrad static inline struct i915_ppgtt * 443 1.1 riastrad i915_vm_to_ppgtt(struct i915_address_space *vm) 444 1.1 riastrad { 445 1.1 riastrad BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); 446 1.1 riastrad GEM_BUG_ON(i915_is_ggtt(vm)); 447 1.1 riastrad return container_of(vm, struct i915_ppgtt, vm); 448 1.1 riastrad } 449 1.1 riastrad 450 1.1 riastrad static inline struct i915_address_space * 451 1.1 riastrad i915_vm_get(struct i915_address_space *vm) 452 1.1 riastrad { 453 1.1 riastrad kref_get(&vm->ref); 454 1.1 riastrad return vm; 455 1.1 riastrad } 456 1.1 riastrad 457 1.1 riastrad void i915_vm_release(struct kref *kref); 458 1.1 riastrad 459 1.1 riastrad static inline void i915_vm_put(struct i915_address_space *vm) 460 1.1 riastrad { 461 1.1 riastrad kref_put(&vm->ref, i915_vm_release); 462 1.1 riastrad } 463 1.1 riastrad 464 1.1 riastrad static inline struct i915_address_space * 465 1.1 riastrad i915_vm_open(struct i915_address_space *vm) 466 1.1 riastrad { 467 1.1 riastrad GEM_BUG_ON(!atomic_read(&vm->open)); 468 1.1 riastrad atomic_inc(&vm->open); 469 1.1 riastrad return i915_vm_get(vm); 470 1.1 riastrad } 471 1.1 riastrad 472 1.1 riastrad static inline bool 473 1.1 riastrad i915_vm_tryopen(struct i915_address_space *vm) 474 1.1 riastrad { 475 1.1 riastrad if (atomic_add_unless(&vm->open, 1, 0)) 476 1.1 riastrad return i915_vm_get(vm); 477 1.1 riastrad 478 1.1 riastrad return false; 479 1.1 riastrad } 480 1.1 riastrad 481 1.1 riastrad void __i915_vm_close(struct i915_address_space *vm); 482 1.1 riastrad 483 1.1 riastrad static inline void 484 1.1 riastrad i915_vm_close(struct i915_address_space *vm) 485 1.1 riastrad { 486 1.1 riastrad GEM_BUG_ON(!atomic_read(&vm->open)); 487 1.1 riastrad if (atomic_dec_and_test(&vm->open)) 488 1.1 riastrad __i915_vm_close(vm); 489 1.1 riastrad 490 1.1 riastrad i915_vm_put(vm); 491 1.1 riastrad } 492 1.1 riastrad 493 1.1 riastrad void i915_address_space_init(struct i915_address_space *vm, int subclass); 494 1.1 riastrad void i915_address_space_fini(struct i915_address_space *vm); 495 1.1 riastrad 496 1.1 riastrad static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) 497 1.1 riastrad { 498 1.1 riastrad const u32 mask = NUM_PTE(pde_shift) - 1; 499 1.1 riastrad 500 1.1 riastrad return (address >> PAGE_SHIFT) & mask; 501 1.1 riastrad } 502 1.1 riastrad 503 1.1 riastrad /* 504 1.1 riastrad * Helper to counts the number of PTEs within the given length. This count 505 1.1 riastrad * does not cross a page table boundary, so the max value would be 506 1.1 riastrad * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. 507 1.1 riastrad */ 508 1.1 riastrad static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) 509 1.1 riastrad { 510 1.1 riastrad const u64 mask = ~((1ULL << pde_shift) - 1); 511 1.1 riastrad u64 end; 512 1.1 riastrad 513 1.1 riastrad GEM_BUG_ON(length == 0); 514 1.1 riastrad GEM_BUG_ON(offset_in_page(addr | length)); 515 1.1 riastrad 516 1.1 riastrad end = addr + length; 517 1.1 riastrad 518 1.1 riastrad if ((addr & mask) != (end & mask)) 519 1.1 riastrad return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); 520 1.1 riastrad 521 1.1 riastrad return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); 522 1.1 riastrad } 523 1.1 riastrad 524 1.1 riastrad static inline u32 i915_pde_index(u64 addr, u32 shift) 525 1.1 riastrad { 526 1.1 riastrad return (addr >> shift) & I915_PDE_MASK; 527 1.1 riastrad } 528 1.1 riastrad 529 1.1 riastrad static inline struct i915_page_table * 530 1.1 riastrad i915_pt_entry(const struct i915_page_directory * const pd, 531 1.1 riastrad const unsigned short n) 532 1.1 riastrad { 533 1.1 riastrad return pd->entry[n]; 534 1.1 riastrad } 535 1.1 riastrad 536 1.1 riastrad static inline struct i915_page_directory * 537 1.1 riastrad i915_pd_entry(const struct i915_page_directory * const pdp, 538 1.1 riastrad const unsigned short n) 539 1.1 riastrad { 540 1.1 riastrad return pdp->entry[n]; 541 1.1 riastrad } 542 1.1 riastrad 543 1.1 riastrad static inline dma_addr_t 544 1.1 riastrad i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) 545 1.1 riastrad { 546 1.1 riastrad struct i915_page_dma *pt = ppgtt->pd->entry[n]; 547 1.1 riastrad 548 1.1 riastrad return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); 549 1.1 riastrad } 550 1.1 riastrad 551 1.1 riastrad void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt); 552 1.1 riastrad 553 1.1 riastrad int i915_ggtt_probe_hw(struct drm_i915_private *i915); 554 1.1 riastrad int i915_ggtt_init_hw(struct drm_i915_private *i915); 555 1.1 riastrad int i915_ggtt_enable_hw(struct drm_i915_private *i915); 556 1.1 riastrad void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); 557 1.1 riastrad void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); 558 1.1 riastrad int i915_init_ggtt(struct drm_i915_private *i915); 559 1.1 riastrad void i915_ggtt_driver_release(struct drm_i915_private *i915); 560 1.1 riastrad 561 1.1 riastrad static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) 562 1.1 riastrad { 563 1.1 riastrad return ggtt->mappable_end > 0; 564 1.1 riastrad } 565 1.1 riastrad 566 1.1 riastrad int i915_ppgtt_init_hw(struct intel_gt *gt); 567 1.1 riastrad 568 1.1 riastrad struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); 569 1.1 riastrad 570 1.1 riastrad void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915); 571 1.1 riastrad void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915); 572 1.1 riastrad 573 1.1 riastrad u64 gen8_pte_encode(dma_addr_t addr, 574 1.1 riastrad enum i915_cache_level level, 575 1.1 riastrad u32 flags); 576 1.1 riastrad 577 1.1 riastrad int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); 578 1.1 riastrad void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); 579 1.1 riastrad 580 1.1 riastrad #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) 581 1.1 riastrad 582 1.1 riastrad void 583 1.1 riastrad fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count); 584 1.1 riastrad 585 1.1 riastrad #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) 586 1.1 riastrad #define fill32_px(px, v) do { \ 587 1.1 riastrad u64 v__ = lower_32_bits(v); \ 588 1.1 riastrad fill_px((px), v__ << 32 | v__); \ 589 1.1 riastrad } while (0) 590 1.1 riastrad 591 1.1 riastrad int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp); 592 1.1 riastrad void cleanup_scratch_page(struct i915_address_space *vm); 593 1.1 riastrad void free_scratch(struct i915_address_space *vm); 594 1.1 riastrad 595 1.1 riastrad struct i915_page_table *alloc_pt(struct i915_address_space *vm); 596 1.1 riastrad struct i915_page_directory *alloc_pd(struct i915_address_space *vm); 597 1.1 riastrad struct i915_page_directory *__alloc_pd(size_t sz); 598 1.1 riastrad 599 1.1 riastrad void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd); 600 1.1 riastrad 601 1.1 riastrad #define free_px(vm, px) free_pd(vm, px_base(px)) 602 1.1 riastrad 603 1.1 riastrad void 604 1.1 riastrad __set_pd_entry(struct i915_page_directory * const pd, 605 1.1 riastrad const unsigned short idx, 606 1.1 riastrad struct i915_page_dma * const to, 607 1.1 riastrad u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); 608 1.1 riastrad 609 1.1 riastrad #define set_pd_entry(pd, idx, to) \ 610 1.1 riastrad __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) 611 1.1 riastrad 612 1.1 riastrad void 613 1.1 riastrad clear_pd_entry(struct i915_page_directory * const pd, 614 1.1 riastrad const unsigned short idx, 615 1.1 riastrad const struct i915_page_scratch * const scratch); 616 1.1 riastrad 617 1.1 riastrad bool 618 1.1 riastrad release_pd_entry(struct i915_page_directory * const pd, 619 1.1 riastrad const unsigned short idx, 620 1.1 riastrad struct i915_page_table * const pt, 621 1.1 riastrad const struct i915_page_scratch * const scratch); 622 1.1 riastrad void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); 623 1.1 riastrad 624 1.1 riastrad int ggtt_set_pages(struct i915_vma *vma); 625 1.1 riastrad int ppgtt_set_pages(struct i915_vma *vma); 626 1.1 riastrad void clear_pages(struct i915_vma *vma); 627 1.1 riastrad 628 1.1 riastrad void gtt_write_workarounds(struct intel_gt *gt); 629 1.1 riastrad 630 1.1 riastrad void setup_private_pat(struct intel_uncore *uncore); 631 1.1 riastrad 632 1.3 riastrad #ifdef __NetBSD__ 633 1.3 riastrad struct sgt_dma { 634 1.3 riastrad bus_dmamap_t map; 635 1.3 riastrad unsigned seg; 636 1.3 riastrad bus_size_t off; 637 1.3 riastrad }; 638 1.3 riastrad static inline struct sgt_dma 639 1.3 riastrad sgt_dma(struct i915_vma *vma) 640 1.3 riastrad { 641 1.11 riastrad return (struct sgt_dma) { vma->pages->sgl->sg_dmamap, 0, 0 }; 642 1.3 riastrad } 643 1.3 riastrad #else 644 1.1 riastrad static inline struct sgt_dma { 645 1.1 riastrad struct scatterlist *sg; 646 1.1 riastrad dma_addr_t dma, max; 647 1.1 riastrad } sgt_dma(struct i915_vma *vma) { 648 1.1 riastrad struct scatterlist *sg = vma->pages->sgl; 649 1.1 riastrad dma_addr_t addr = sg_dma_address(sg); 650 1.1 riastrad 651 1.1 riastrad return (struct sgt_dma){ sg, addr, addr + sg->length }; 652 1.1 riastrad } 653 1.3 riastrad #endif 654 1.1 riastrad 655 1.1 riastrad #endif 656