Home | History | Annotate | Line # | Download | only in gt
gen8_ppgtt.c revision 1.3
      1 /*	$NetBSD: gen8_ppgtt.c,v 1.3 2021/12/19 01:24:25 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: MIT
      4 /*
      5  * Copyright  2020 Intel Corporation
      6  */
      7 
      8 #include <sys/cdefs.h>
      9 __KERNEL_RCSID(0, "$NetBSD: gen8_ppgtt.c,v 1.3 2021/12/19 01:24:25 riastradh Exp $");
     10 
     11 #include <linux/log2.h>
     12 
     13 #include "gen8_ppgtt.h"
     14 #include "i915_scatterlist.h"
     15 #include "i915_trace.h"
     16 #include "i915_vgpu.h"
     17 #include "intel_gt.h"
     18 #include "intel_gtt.h"
     19 
     20 static u64 gen8_pde_encode(const dma_addr_t addr,
     21 			   const enum i915_cache_level level)
     22 {
     23 	u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
     24 
     25 	if (level != I915_CACHE_NONE)
     26 		pde |= PPAT_CACHED_PDE;
     27 	else
     28 		pde |= PPAT_UNCACHED;
     29 
     30 	return pde;
     31 }
     32 
     33 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
     34 {
     35 	struct drm_i915_private *i915 = ppgtt->vm.i915;
     36 	struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
     37 	enum vgt_g2v_type msg;
     38 	int i;
     39 
     40 	if (create)
     41 		atomic_inc(px_used(ppgtt->pd)); /* never remove */
     42 	else
     43 		atomic_dec(px_used(ppgtt->pd));
     44 
     45 	mutex_lock(&i915->vgpu.lock);
     46 
     47 	if (i915_vm_is_4lvl(&ppgtt->vm)) {
     48 		const u64 daddr = px_dma(ppgtt->pd);
     49 
     50 		intel_uncore_write(uncore,
     51 				   vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
     52 		intel_uncore_write(uncore,
     53 				   vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
     54 
     55 		msg = create ?
     56 			VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
     57 			VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
     58 	} else {
     59 		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
     60 			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
     61 
     62 			intel_uncore_write(uncore,
     63 					   vgtif_reg(pdp[i].lo),
     64 					   lower_32_bits(daddr));
     65 			intel_uncore_write(uncore,
     66 					   vgtif_reg(pdp[i].hi),
     67 					   upper_32_bits(daddr));
     68 		}
     69 
     70 		msg = create ?
     71 			VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
     72 			VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
     73 	}
     74 
     75 	/* g2v_notify atomically (via hv trap) consumes the message packet. */
     76 	intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
     77 
     78 	mutex_unlock(&i915->vgpu.lock);
     79 }
     80 
     81 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
     82 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
     83 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
     84 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
     85 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
     86 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
     87 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
     88 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
     89 
     90 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
     91 
     92 static inline unsigned int
     93 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
     94 {
     95 	const int shift = gen8_pd_shift(lvl);
     96 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
     97 
     98 	GEM_BUG_ON(start >= end);
     99 	end += ~mask >> gen8_pd_shift(1);
    100 
    101 	*idx = i915_pde_index(start, shift);
    102 	if ((start ^ end) & mask)
    103 		return GEN8_PDES - *idx;
    104 	else
    105 		return i915_pde_index(end, shift) - *idx;
    106 }
    107 
    108 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
    109 {
    110 	const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
    111 
    112 	GEM_BUG_ON(start >= end);
    113 	return (start ^ end) & mask && (start & ~mask) == 0;
    114 }
    115 
    116 static inline unsigned int gen8_pt_count(u64 start, u64 end)
    117 {
    118 	GEM_BUG_ON(start >= end);
    119 	if ((start ^ end) >> gen8_pd_shift(1))
    120 		return GEN8_PDES - (start & (GEN8_PDES - 1));
    121 	else
    122 		return end - start;
    123 }
    124 
    125 static inline unsigned int
    126 gen8_pd_top_count(const struct i915_address_space *vm)
    127 {
    128 	unsigned int shift = __gen8_pte_shift(vm->top);
    129 	return (vm->total + (1ull << shift) - 1) >> shift;
    130 }
    131 
    132 static inline struct i915_page_directory *
    133 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
    134 {
    135 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
    136 
    137 	if (vm->top == 2)
    138 		return ppgtt->pd;
    139 	else
    140 		return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
    141 }
    142 
    143 static inline struct i915_page_directory *
    144 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
    145 {
    146 	return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
    147 }
    148 
    149 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
    150 				 struct i915_page_directory *pd,
    151 				 int count, int lvl)
    152 {
    153 	if (lvl) {
    154 		void **pde = pd->entry;
    155 
    156 		do {
    157 			if (!*pde)
    158 				continue;
    159 
    160 			__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
    161 		} while (pde++, --count);
    162 	}
    163 
    164 	free_px(vm, pd);
    165 }
    166 
    167 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
    168 {
    169 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
    170 
    171 	if (intel_vgpu_active(vm->i915))
    172 		gen8_ppgtt_notify_vgt(ppgtt, false);
    173 
    174 	__gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
    175 	free_scratch(vm);
    176 }
    177 
    178 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
    179 			      struct i915_page_directory * const pd,
    180 			      u64 start, const u64 end, int lvl)
    181 {
    182 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
    183 	unsigned int idx, len;
    184 
    185 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
    186 
    187 	len = gen8_pd_range(start, end, lvl--, &idx);
    188 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
    189 	    __func__, vm, lvl + 1, start, end,
    190 	    idx, len, atomic_read(px_used(pd)));
    191 	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
    192 
    193 	do {
    194 		struct i915_page_table *pt = pd->entry[idx];
    195 
    196 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
    197 		    gen8_pd_contains(start, end, lvl)) {
    198 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
    199 			    __func__, vm, lvl + 1, idx, start, end);
    200 			clear_pd_entry(pd, idx, scratch);
    201 			__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
    202 			start += (u64)I915_PDES << gen8_pd_shift(lvl);
    203 			continue;
    204 		}
    205 
    206 		if (lvl) {
    207 			start = __gen8_ppgtt_clear(vm, as_pd(pt),
    208 						   start, end, lvl);
    209 		} else {
    210 			unsigned int count;
    211 			u64 *vaddr;
    212 
    213 			count = gen8_pt_count(start, end);
    214 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
    215 			    __func__, vm, lvl, start, end,
    216 			    gen8_pd_index(start, 0), count,
    217 			    atomic_read(&pt->used));
    218 			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
    219 
    220 			vaddr = kmap_atomic_px(pt);
    221 			memset64(vaddr + gen8_pd_index(start, 0),
    222 				 vm->scratch[0].encode,
    223 				 count);
    224 			kunmap_atomic(vaddr);
    225 
    226 			atomic_sub(count, &pt->used);
    227 			start += count;
    228 		}
    229 
    230 		if (release_pd_entry(pd, idx, pt, scratch))
    231 			free_px(vm, pt);
    232 	} while (idx++, --len);
    233 
    234 	return start;
    235 }
    236 
    237 static void gen8_ppgtt_clear(struct i915_address_space *vm,
    238 			     u64 start, u64 length)
    239 {
    240 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
    241 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
    242 	GEM_BUG_ON(range_overflows(start, length, vm->total));
    243 
    244 	start >>= GEN8_PTE_SHIFT;
    245 	length >>= GEN8_PTE_SHIFT;
    246 	GEM_BUG_ON(length == 0);
    247 
    248 	__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
    249 			   start, start + length, vm->top);
    250 }
    251 
    252 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
    253 			      struct i915_page_directory * const pd,
    254 			      u64 * const start, const u64 end, int lvl)
    255 {
    256 	const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
    257 	struct i915_page_table *alloc = NULL;
    258 	unsigned int idx, len;
    259 	int ret = 0;
    260 
    261 	GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
    262 
    263 	len = gen8_pd_range(*start, end, lvl--, &idx);
    264 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
    265 	    __func__, vm, lvl + 1, *start, end,
    266 	    idx, len, atomic_read(px_used(pd)));
    267 	GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
    268 
    269 	spin_lock(&pd->lock);
    270 	GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
    271 	do {
    272 		struct i915_page_table *pt = pd->entry[idx];
    273 
    274 		if (!pt) {
    275 			spin_unlock(&pd->lock);
    276 
    277 			DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
    278 			    __func__, vm, lvl + 1, idx);
    279 
    280 			pt = fetch_and_zero(&alloc);
    281 			if (lvl) {
    282 				if (!pt) {
    283 					pt = &alloc_pd(vm)->pt;
    284 					if (IS_ERR(pt)) {
    285 						ret = PTR_ERR(pt);
    286 						goto out;
    287 					}
    288 				}
    289 
    290 				fill_px(pt, vm->scratch[lvl].encode);
    291 			} else {
    292 				if (!pt) {
    293 					pt = alloc_pt(vm);
    294 					if (IS_ERR(pt)) {
    295 						ret = PTR_ERR(pt);
    296 						goto out;
    297 					}
    298 				}
    299 
    300 				if (intel_vgpu_active(vm->i915) ||
    301 				    gen8_pt_count(*start, end) < I915_PDES)
    302 					fill_px(pt, vm->scratch[lvl].encode);
    303 			}
    304 
    305 			spin_lock(&pd->lock);
    306 			if (likely(!pd->entry[idx]))
    307 				set_pd_entry(pd, idx, pt);
    308 			else
    309 				alloc = pt, pt = pd->entry[idx];
    310 		}
    311 
    312 		if (lvl) {
    313 			atomic_inc(&pt->used);
    314 			spin_unlock(&pd->lock);
    315 
    316 			ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
    317 						 start, end, lvl);
    318 			if (unlikely(ret)) {
    319 				if (release_pd_entry(pd, idx, pt, scratch))
    320 					free_px(vm, pt);
    321 				goto out;
    322 			}
    323 
    324 			spin_lock(&pd->lock);
    325 			atomic_dec(&pt->used);
    326 			GEM_BUG_ON(!atomic_read(&pt->used));
    327 		} else {
    328 			unsigned int count = gen8_pt_count(*start, end);
    329 
    330 			DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
    331 			    __func__, vm, lvl, *start, end,
    332 			    gen8_pd_index(*start, 0), count,
    333 			    atomic_read(&pt->used));
    334 
    335 			atomic_add(count, &pt->used);
    336 			/* All other pdes may be simultaneously removed */
    337 			GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
    338 			*start += count;
    339 		}
    340 	} while (idx++, --len);
    341 	spin_unlock(&pd->lock);
    342 out:
    343 	if (alloc)
    344 		free_px(vm, alloc);
    345 	return ret;
    346 }
    347 
    348 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
    349 			    u64 start, u64 length)
    350 {
    351 	u64 from;
    352 	int err;
    353 
    354 	GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
    355 	GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
    356 	GEM_BUG_ON(range_overflows(start, length, vm->total));
    357 
    358 	start >>= GEN8_PTE_SHIFT;
    359 	length >>= GEN8_PTE_SHIFT;
    360 	GEM_BUG_ON(length == 0);
    361 	from = start;
    362 
    363 	err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
    364 				 &start, start + length, vm->top);
    365 	if (unlikely(err && from != start))
    366 		__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
    367 				   from, start, vm->top);
    368 
    369 	return err;
    370 }
    371 
    372 static __always_inline u64
    373 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
    374 		      struct i915_page_directory *pdp,
    375 		      struct sgt_dma *iter,
    376 		      u64 idx,
    377 		      enum i915_cache_level cache_level,
    378 		      u32 flags)
    379 {
    380 	struct i915_page_directory *pd;
    381 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
    382 	gen8_pte_t *vaddr;
    383 
    384 	pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
    385 	vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
    386 	do {
    387 #ifdef __NetBSD__
    388 		KASSERT(iter->seg < iter->map->dm_nsegs);
    389 		KASSERT((iter->off & (I915_GTT_PAGE_SIZE - 1)) == 0);
    390 		const bus_dma_segment_t *seg = &iter->map->dm_segs[iter->seg];
    391 		KASSERT((seg->ds_addr & (I915_GTT_PAGE_SIZE - 1)) == 0);
    392 		KASSERT((seg->ds_len & (I915_GTT_PAGE_SIZE - 1)) == 0);
    393 		KASSERT(iter->off <= seg->ds_len - I915_GTT_PAGE_SIZE);
    394 		vaddr[idx->pte] = pte_encode | (seg->ds_addr + iter->off);
    395 		iter->off += I915_GTT_PAGE_SIZE;
    396 		if (iter->off >= seg->ds_len) {
    397 			GEM_BUG_ON(iter->off > seg->ds_len);
    398 			iter->off = 0;
    399 			if (++iter->seg >= iter->map->dm_nsegs) {
    400 				GEM_BUG_ON(iter->seg > iter->map->dm_nsegs);
    401 				ret = false;
    402 				break;
    403 			}
    404 		}
    405 #else
    406 		GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
    407 		vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
    408 
    409 		iter->dma += I915_GTT_PAGE_SIZE;
    410 		if (iter->dma >= iter->max) {
    411 			iter->sg = __sg_next(iter->sg);
    412 			if (!iter->sg) {
    413 				idx = 0;
    414 				break;
    415 			}
    416 
    417 			iter->dma = sg_dma_address(iter->sg);
    418 			iter->max = iter->dma + iter->sg->length;
    419 		}
    420 #endif
    421 
    422 		if (gen8_pd_index(++idx, 0) == 0) {
    423 			if (gen8_pd_index(idx, 1) == 0) {
    424 				/* Limited by sg length for 3lvl */
    425 				if (gen8_pd_index(idx, 2) == 0)
    426 					break;
    427 
    428 				pd = pdp->entry[gen8_pd_index(idx, 2)];
    429 			}
    430 
    431 			kunmap_atomic(vaddr);
    432 			vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
    433 		}
    434 	} while (1);
    435 	kunmap_atomic(vaddr);
    436 
    437 	return idx;
    438 }
    439 
    440 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
    441 				   struct sgt_dma *iter,
    442 				   enum i915_cache_level cache_level,
    443 				   u32 flags)
    444 {
    445 	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
    446 	u64 start = vma->node.start;
    447 #ifdef __NetBSD__
    448 	bus_size_t rem = iter->map->dm_segs[iter->seg].ds_len - iter->off;
    449 #else
    450 	dma_addr_t rem = iter->sg->length;
    451 #endif
    452 
    453 	GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
    454 
    455 	do {
    456 		struct i915_page_directory * const pdp =
    457 			gen8_pdp_for_page_address(vma->vm, start);
    458 		struct i915_page_directory * const pd =
    459 			i915_pd_entry(pdp, __gen8_pte_index(start, 2));
    460 		gen8_pte_t encode = pte_encode;
    461 		unsigned int maybe_64K = -1;
    462 		unsigned int page_size;
    463 		gen8_pte_t *vaddr;
    464 		u16 index;
    465 
    466 		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
    467 #ifdef __NetBSD__
    468 		    IS_ALIGNED((iter->map->dm_segs[iter->seg].ds_addr +
    469 			    iter->off),
    470 			I915_GTT_PAGE_SIZE_2M) &&
    471 #else
    472 		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
    473 #endif
    474 		    rem >= I915_GTT_PAGE_SIZE_2M &&
    475 		    !__gen8_pte_index(start, 0)) {
    476 			index = __gen8_pte_index(start, 1);
    477 			encode |= GEN8_PDE_PS_2M;
    478 			page_size = I915_GTT_PAGE_SIZE_2M;
    479 
    480 			vaddr = kmap_atomic_px(pd);
    481 		} else {
    482 			struct i915_page_table *pt =
    483 				i915_pt_entry(pd, __gen8_pte_index(start, 1));
    484 
    485 			index = __gen8_pte_index(start, 0);
    486 			page_size = I915_GTT_PAGE_SIZE;
    487 
    488 			if (!index &&
    489 			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
    490 #ifdef __NetBSD__
    491 			    IS_ALIGNED((iter->map->dm_segs[iter->seg].ds_addr
    492 				    + iter->off),
    493 				I915_GTT_PAGE_SIZE_64K) &&
    494 #else
    495 			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
    496 #endif
    497 			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
    498 			     rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
    499 				maybe_64K = __gen8_pte_index(start, 1);
    500 
    501 			vaddr = kmap_atomic_px(pt);
    502 		}
    503 
    504 		do {
    505 #ifdef __NetBSD__
    506 			GEM_BUG_ON((iter->map->ds_seg[iter->seg].ds_len -
    507 				iter->off) < page_size);
    508 #else
    509 			GEM_BUG_ON(iter->sg->length < page_size);
    510 #endif
    511 			vaddr[index++] = encode | iter->dma;
    512 
    513 			start += page_size;
    514 #ifdef __NetBSD__
    515 			iter->off += page_size;
    516 			rem -= page_size;
    517 			if (iter->off >= iter->map->ds_seg[iter->seg].ds_len) {
    518 				GEM_BUG_ON(iter->off >
    519 				    iter->map->ds_seg[iter->seg].ds_len);
    520 				iter->off = 0;
    521 				if (++iter->seg >= iter->map->dm_nsegs) {
    522 					GEM_BUG_ON(iter->seg >
    523 					    iter->map->dm_nsegs);
    524 					break;
    525 				}
    526 				const bus_dma_segment_t *seg =
    527 				    &iter->map->dm_segs[iter->seg];
    528 				if (maybe_64K && index < I915_PDES &&
    529 				    !(IS_ALIGNED((seg->ds_addr + iter->off),
    530 					    I915_GTT_PAGE_SIZE_64K) &&
    531 					(IS_ALIGNED(rem,
    532 					    I915_GEM_PAGE_SIZE_64K) ||
    533 					    rem >= ((I915_PDES - index) * I915_GTT_PAGE_SIZE))))
    534 					maybe_64K = false;
    535 				if (unlikely(!IS_ALIGNED((seg->ds_addr +
    536 						iter->off), page_size)))
    537 					break;
    538 			}
    539 #else
    540 			iter->dma += page_size;
    541 			rem -= page_size;
    542 			if (iter->dma >= iter->max) {
    543 				iter->sg = __sg_next(iter->sg);
    544 				if (!iter->sg)
    545 					break;
    546 
    547 				rem = iter->sg->length;
    548 				iter->dma = sg_dma_address(iter->sg);
    549 				iter->max = iter->dma + rem;
    550 
    551 				if (maybe_64K != -1 && index < I915_PDES &&
    552 				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
    553 				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
    554 				       rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
    555 					maybe_64K = -1;
    556 
    557 				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
    558 					break;
    559 			}
    560 #endif
    561 		} while (rem >= page_size && index < I915_PDES);
    562 
    563 		kunmap_atomic(vaddr);
    564 
    565 		/*
    566 		 * Is it safe to mark the 2M block as 64K? -- Either we have
    567 		 * filled whole page-table with 64K entries, or filled part of
    568 		 * it and have reached the end of the sg table and we have
    569 		 * enough padding.
    570 		 */
    571 		if (maybe_64K != -1 &&
    572 		    (index == I915_PDES ||
    573 		     (i915_vm_has_scratch_64K(vma->vm) &&
    574 #ifdef __NetBSD__
    575 		      iter->seg == iter->map->dm_nsegs &&
    576 #else
    577 		      !iter->sg &&
    578 #endif
    579 		      IS_ALIGNED(vma->node.start +
    580 					      vma->node.size,
    581 					      I915_GTT_PAGE_SIZE_2M)))) {
    582 			vaddr = kmap_atomic_px(pd);
    583 			vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
    584 			kunmap_atomic(vaddr);
    585 			page_size = I915_GTT_PAGE_SIZE_64K;
    586 
    587 			/*
    588 			 * We write all 4K page entries, even when using 64K
    589 			 * pages. In order to verify that the HW isn't cheating
    590 			 * by using the 4K PTE instead of the 64K PTE, we want
    591 			 * to remove all the surplus entries. If the HW skipped
    592 			 * the 64K PTE, it will read/write into the scratch page
    593 			 * instead - which we detect as missing results during
    594 			 * selftests.
    595 			 */
    596 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
    597 				u16 i;
    598 
    599 				encode = vma->vm->scratch[0].encode;
    600 				vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
    601 
    602 				for (i = 1; i < index; i += 16)
    603 					memset64(vaddr + i, encode, 15);
    604 
    605 				kunmap_atomic(vaddr);
    606 			}
    607 		}
    608 
    609 		vma->page_sizes.gtt |= page_size;
    610 	}
    611 #ifdef __NetBSD__
    612 	while (iter->seg < iter->map->dm_nsegs);
    613 #else
    614 	while (iter->sg);
    615 #endif
    616 }
    617 
    618 static void gen8_ppgtt_insert(struct i915_address_space *vm,
    619 			      struct i915_vma *vma,
    620 			      enum i915_cache_level cache_level,
    621 			      u32 flags)
    622 {
    623 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
    624 	struct sgt_dma iter = sgt_dma(vma);
    625 
    626 	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
    627 		gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
    628 	} else  {
    629 		u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
    630 
    631 		do {
    632 			struct i915_page_directory * const pdp =
    633 				gen8_pdp_for_page_index(vm, idx);
    634 
    635 			idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
    636 						    cache_level, flags);
    637 		} while (idx);
    638 
    639 		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
    640 	}
    641 }
    642 
    643 static int gen8_init_scratch(struct i915_address_space *vm)
    644 {
    645 	int ret;
    646 	int i;
    647 
    648 	/*
    649 	 * If everybody agrees to not to write into the scratch page,
    650 	 * we can reuse it for all vm, keeping contexts and processes separate.
    651 	 */
    652 	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
    653 		struct i915_address_space *clone = vm->gt->vm;
    654 
    655 		GEM_BUG_ON(!clone->has_read_only);
    656 
    657 		vm->scratch_order = clone->scratch_order;
    658 		memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
    659 		px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
    660 		return 0;
    661 	}
    662 
    663 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
    664 	if (ret)
    665 		return ret;
    666 
    667 	vm->scratch[0].encode =
    668 		gen8_pte_encode(px_dma(&vm->scratch[0]),
    669 				I915_CACHE_LLC, vm->has_read_only);
    670 
    671 	for (i = 1; i <= vm->top; i++) {
    672 		if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
    673 			goto free_scratch;
    674 
    675 		fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
    676 		vm->scratch[i].encode =
    677 			gen8_pde_encode(px_dma(&vm->scratch[i]),
    678 					I915_CACHE_LLC);
    679 	}
    680 
    681 	return 0;
    682 
    683 free_scratch:
    684 	free_scratch(vm);
    685 	return -ENOMEM;
    686 }
    687 
    688 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
    689 {
    690 	struct i915_address_space *vm = &ppgtt->vm;
    691 	struct i915_page_directory *pd = ppgtt->pd;
    692 	unsigned int idx;
    693 
    694 	GEM_BUG_ON(vm->top != 2);
    695 	GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
    696 
    697 	for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
    698 		struct i915_page_directory *pde;
    699 
    700 		pde = alloc_pd(vm);
    701 		if (IS_ERR(pde))
    702 			return PTR_ERR(pde);
    703 
    704 		fill_px(pde, vm->scratch[1].encode);
    705 		set_pd_entry(pd, idx, pde);
    706 		atomic_inc(px_used(pde)); /* keep pinned */
    707 	}
    708 	wmb();
    709 
    710 	return 0;
    711 }
    712 
    713 static struct i915_page_directory *
    714 gen8_alloc_top_pd(struct i915_address_space *vm)
    715 {
    716 	const unsigned int count = gen8_pd_top_count(vm);
    717 	struct i915_page_directory *pd;
    718 
    719 	GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
    720 
    721 	pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
    722 	if (unlikely(!pd))
    723 		return ERR_PTR(-ENOMEM);
    724 
    725 	if (unlikely(setup_page_dma(vm, px_base(pd)))) {
    726 		kfree(pd);
    727 		return ERR_PTR(-ENOMEM);
    728 	}
    729 
    730 	fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
    731 	atomic_inc(px_used(pd)); /* mark as pinned */
    732 	return pd;
    733 }
    734 
    735 /*
    736  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
    737  * with a net effect resembling a 2-level page table in normal x86 terms. Each
    738  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
    739  * space.
    740  *
    741  */
    742 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
    743 {
    744 	struct i915_ppgtt *ppgtt;
    745 	int err;
    746 
    747 	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
    748 	if (!ppgtt)
    749 		return ERR_PTR(-ENOMEM);
    750 
    751 	ppgtt_init(ppgtt, gt);
    752 	ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
    753 
    754 	/*
    755 	 * From bdw, there is hw support for read-only pages in the PPGTT.
    756 	 *
    757 	 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
    758 	 * for now.
    759 	 *
    760 	 * Gen12 has inherited the same read-only fault issue from gen11.
    761 	 */
    762 	ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
    763 
    764 	/*
    765 	 * There are only few exceptions for gen >=6. chv and bxt.
    766 	 * And we are not sure about the latter so play safe for now.
    767 	 */
    768 	if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
    769 		ppgtt->vm.pt_kmap_wc = true;
    770 
    771 	err = gen8_init_scratch(&ppgtt->vm);
    772 	if (err)
    773 		goto err_free;
    774 
    775 	ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
    776 	if (IS_ERR(ppgtt->pd)) {
    777 		err = PTR_ERR(ppgtt->pd);
    778 		goto err_free_scratch;
    779 	}
    780 
    781 	if (!i915_vm_is_4lvl(&ppgtt->vm)) {
    782 		err = gen8_preallocate_top_level_pdp(ppgtt);
    783 		if (err)
    784 			goto err_free_pd;
    785 	}
    786 
    787 	ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
    788 	ppgtt->vm.insert_entries = gen8_ppgtt_insert;
    789 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
    790 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
    791 
    792 	if (intel_vgpu_active(gt->i915))
    793 		gen8_ppgtt_notify_vgt(ppgtt, true);
    794 
    795 	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
    796 
    797 	return ppgtt;
    798 
    799 err_free_pd:
    800 	__gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
    801 			     gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
    802 err_free_scratch:
    803 	free_scratch(&ppgtt->vm);
    804 err_free:
    805 	kfree(ppgtt);
    806 	return ERR_PTR(err);
    807 }
    808