gen8_ppgtt.c revision 1.1.1.1 1 /* $NetBSD: gen8_ppgtt.c,v 1.1.1.1 2021/12/18 20:15:32 riastradh Exp $ */
2
3 // SPDX-License-Identifier: MIT
4 /*
5 * Copyright 2020 Intel Corporation
6 */
7
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: gen8_ppgtt.c,v 1.1.1.1 2021/12/18 20:15:32 riastradh Exp $");
10
11 #include <linux/log2.h>
12
13 #include "gen8_ppgtt.h"
14 #include "i915_scatterlist.h"
15 #include "i915_trace.h"
16 #include "i915_vgpu.h"
17 #include "intel_gt.h"
18 #include "intel_gtt.h"
19
20 static u64 gen8_pde_encode(const dma_addr_t addr,
21 const enum i915_cache_level level)
22 {
23 u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
24
25 if (level != I915_CACHE_NONE)
26 pde |= PPAT_CACHED_PDE;
27 else
28 pde |= PPAT_UNCACHED;
29
30 return pde;
31 }
32
33 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
34 {
35 struct drm_i915_private *i915 = ppgtt->vm.i915;
36 struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
37 enum vgt_g2v_type msg;
38 int i;
39
40 if (create)
41 atomic_inc(px_used(ppgtt->pd)); /* never remove */
42 else
43 atomic_dec(px_used(ppgtt->pd));
44
45 mutex_lock(&i915->vgpu.lock);
46
47 if (i915_vm_is_4lvl(&ppgtt->vm)) {
48 const u64 daddr = px_dma(ppgtt->pd);
49
50 intel_uncore_write(uncore,
51 vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
52 intel_uncore_write(uncore,
53 vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
54
55 msg = create ?
56 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
57 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
58 } else {
59 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
60 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
61
62 intel_uncore_write(uncore,
63 vgtif_reg(pdp[i].lo),
64 lower_32_bits(daddr));
65 intel_uncore_write(uncore,
66 vgtif_reg(pdp[i].hi),
67 upper_32_bits(daddr));
68 }
69
70 msg = create ?
71 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
72 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
73 }
74
75 /* g2v_notify atomically (via hv trap) consumes the message packet. */
76 intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
77
78 mutex_unlock(&i915->vgpu.lock);
79 }
80
81 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
82 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
83 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
84 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
85 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
86 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
87 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
88 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
89
90 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
91
92 static inline unsigned int
93 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
94 {
95 const int shift = gen8_pd_shift(lvl);
96 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
97
98 GEM_BUG_ON(start >= end);
99 end += ~mask >> gen8_pd_shift(1);
100
101 *idx = i915_pde_index(start, shift);
102 if ((start ^ end) & mask)
103 return GEN8_PDES - *idx;
104 else
105 return i915_pde_index(end, shift) - *idx;
106 }
107
108 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
109 {
110 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
111
112 GEM_BUG_ON(start >= end);
113 return (start ^ end) & mask && (start & ~mask) == 0;
114 }
115
116 static inline unsigned int gen8_pt_count(u64 start, u64 end)
117 {
118 GEM_BUG_ON(start >= end);
119 if ((start ^ end) >> gen8_pd_shift(1))
120 return GEN8_PDES - (start & (GEN8_PDES - 1));
121 else
122 return end - start;
123 }
124
125 static inline unsigned int
126 gen8_pd_top_count(const struct i915_address_space *vm)
127 {
128 unsigned int shift = __gen8_pte_shift(vm->top);
129 return (vm->total + (1ull << shift) - 1) >> shift;
130 }
131
132 static inline struct i915_page_directory *
133 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
134 {
135 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
136
137 if (vm->top == 2)
138 return ppgtt->pd;
139 else
140 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
141 }
142
143 static inline struct i915_page_directory *
144 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
145 {
146 return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
147 }
148
149 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
150 struct i915_page_directory *pd,
151 int count, int lvl)
152 {
153 if (lvl) {
154 void **pde = pd->entry;
155
156 do {
157 if (!*pde)
158 continue;
159
160 __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
161 } while (pde++, --count);
162 }
163
164 free_px(vm, pd);
165 }
166
167 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
168 {
169 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
170
171 if (intel_vgpu_active(vm->i915))
172 gen8_ppgtt_notify_vgt(ppgtt, false);
173
174 __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
175 free_scratch(vm);
176 }
177
178 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
179 struct i915_page_directory * const pd,
180 u64 start, const u64 end, int lvl)
181 {
182 const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
183 unsigned int idx, len;
184
185 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
186
187 len = gen8_pd_range(start, end, lvl--, &idx);
188 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
189 __func__, vm, lvl + 1, start, end,
190 idx, len, atomic_read(px_used(pd)));
191 GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
192
193 do {
194 struct i915_page_table *pt = pd->entry[idx];
195
196 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
197 gen8_pd_contains(start, end, lvl)) {
198 DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
199 __func__, vm, lvl + 1, idx, start, end);
200 clear_pd_entry(pd, idx, scratch);
201 __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
202 start += (u64)I915_PDES << gen8_pd_shift(lvl);
203 continue;
204 }
205
206 if (lvl) {
207 start = __gen8_ppgtt_clear(vm, as_pd(pt),
208 start, end, lvl);
209 } else {
210 unsigned int count;
211 u64 *vaddr;
212
213 count = gen8_pt_count(start, end);
214 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
215 __func__, vm, lvl, start, end,
216 gen8_pd_index(start, 0), count,
217 atomic_read(&pt->used));
218 GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
219
220 vaddr = kmap_atomic_px(pt);
221 memset64(vaddr + gen8_pd_index(start, 0),
222 vm->scratch[0].encode,
223 count);
224 kunmap_atomic(vaddr);
225
226 atomic_sub(count, &pt->used);
227 start += count;
228 }
229
230 if (release_pd_entry(pd, idx, pt, scratch))
231 free_px(vm, pt);
232 } while (idx++, --len);
233
234 return start;
235 }
236
237 static void gen8_ppgtt_clear(struct i915_address_space *vm,
238 u64 start, u64 length)
239 {
240 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
241 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
242 GEM_BUG_ON(range_overflows(start, length, vm->total));
243
244 start >>= GEN8_PTE_SHIFT;
245 length >>= GEN8_PTE_SHIFT;
246 GEM_BUG_ON(length == 0);
247
248 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
249 start, start + length, vm->top);
250 }
251
252 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
253 struct i915_page_directory * const pd,
254 u64 * const start, const u64 end, int lvl)
255 {
256 const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
257 struct i915_page_table *alloc = NULL;
258 unsigned int idx, len;
259 int ret = 0;
260
261 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
262
263 len = gen8_pd_range(*start, end, lvl--, &idx);
264 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
265 __func__, vm, lvl + 1, *start, end,
266 idx, len, atomic_read(px_used(pd)));
267 GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
268
269 spin_lock(&pd->lock);
270 GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
271 do {
272 struct i915_page_table *pt = pd->entry[idx];
273
274 if (!pt) {
275 spin_unlock(&pd->lock);
276
277 DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
278 __func__, vm, lvl + 1, idx);
279
280 pt = fetch_and_zero(&alloc);
281 if (lvl) {
282 if (!pt) {
283 pt = &alloc_pd(vm)->pt;
284 if (IS_ERR(pt)) {
285 ret = PTR_ERR(pt);
286 goto out;
287 }
288 }
289
290 fill_px(pt, vm->scratch[lvl].encode);
291 } else {
292 if (!pt) {
293 pt = alloc_pt(vm);
294 if (IS_ERR(pt)) {
295 ret = PTR_ERR(pt);
296 goto out;
297 }
298 }
299
300 if (intel_vgpu_active(vm->i915) ||
301 gen8_pt_count(*start, end) < I915_PDES)
302 fill_px(pt, vm->scratch[lvl].encode);
303 }
304
305 spin_lock(&pd->lock);
306 if (likely(!pd->entry[idx]))
307 set_pd_entry(pd, idx, pt);
308 else
309 alloc = pt, pt = pd->entry[idx];
310 }
311
312 if (lvl) {
313 atomic_inc(&pt->used);
314 spin_unlock(&pd->lock);
315
316 ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
317 start, end, lvl);
318 if (unlikely(ret)) {
319 if (release_pd_entry(pd, idx, pt, scratch))
320 free_px(vm, pt);
321 goto out;
322 }
323
324 spin_lock(&pd->lock);
325 atomic_dec(&pt->used);
326 GEM_BUG_ON(!atomic_read(&pt->used));
327 } else {
328 unsigned int count = gen8_pt_count(*start, end);
329
330 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
331 __func__, vm, lvl, *start, end,
332 gen8_pd_index(*start, 0), count,
333 atomic_read(&pt->used));
334
335 atomic_add(count, &pt->used);
336 /* All other pdes may be simultaneously removed */
337 GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
338 *start += count;
339 }
340 } while (idx++, --len);
341 spin_unlock(&pd->lock);
342 out:
343 if (alloc)
344 free_px(vm, alloc);
345 return ret;
346 }
347
348 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
349 u64 start, u64 length)
350 {
351 u64 from;
352 int err;
353
354 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
355 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
356 GEM_BUG_ON(range_overflows(start, length, vm->total));
357
358 start >>= GEN8_PTE_SHIFT;
359 length >>= GEN8_PTE_SHIFT;
360 GEM_BUG_ON(length == 0);
361 from = start;
362
363 err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
364 &start, start + length, vm->top);
365 if (unlikely(err && from != start))
366 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
367 from, start, vm->top);
368
369 return err;
370 }
371
372 static __always_inline u64
373 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
374 struct i915_page_directory *pdp,
375 struct sgt_dma *iter,
376 u64 idx,
377 enum i915_cache_level cache_level,
378 u32 flags)
379 {
380 struct i915_page_directory *pd;
381 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
382 gen8_pte_t *vaddr;
383
384 pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
385 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
386 do {
387 GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
388 vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
389
390 iter->dma += I915_GTT_PAGE_SIZE;
391 if (iter->dma >= iter->max) {
392 iter->sg = __sg_next(iter->sg);
393 if (!iter->sg) {
394 idx = 0;
395 break;
396 }
397
398 iter->dma = sg_dma_address(iter->sg);
399 iter->max = iter->dma + iter->sg->length;
400 }
401
402 if (gen8_pd_index(++idx, 0) == 0) {
403 if (gen8_pd_index(idx, 1) == 0) {
404 /* Limited by sg length for 3lvl */
405 if (gen8_pd_index(idx, 2) == 0)
406 break;
407
408 pd = pdp->entry[gen8_pd_index(idx, 2)];
409 }
410
411 kunmap_atomic(vaddr);
412 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
413 }
414 } while (1);
415 kunmap_atomic(vaddr);
416
417 return idx;
418 }
419
420 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
421 struct sgt_dma *iter,
422 enum i915_cache_level cache_level,
423 u32 flags)
424 {
425 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
426 u64 start = vma->node.start;
427 dma_addr_t rem = iter->sg->length;
428
429 GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
430
431 do {
432 struct i915_page_directory * const pdp =
433 gen8_pdp_for_page_address(vma->vm, start);
434 struct i915_page_directory * const pd =
435 i915_pd_entry(pdp, __gen8_pte_index(start, 2));
436 gen8_pte_t encode = pte_encode;
437 unsigned int maybe_64K = -1;
438 unsigned int page_size;
439 gen8_pte_t *vaddr;
440 u16 index;
441
442 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
443 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
444 rem >= I915_GTT_PAGE_SIZE_2M &&
445 !__gen8_pte_index(start, 0)) {
446 index = __gen8_pte_index(start, 1);
447 encode |= GEN8_PDE_PS_2M;
448 page_size = I915_GTT_PAGE_SIZE_2M;
449
450 vaddr = kmap_atomic_px(pd);
451 } else {
452 struct i915_page_table *pt =
453 i915_pt_entry(pd, __gen8_pte_index(start, 1));
454
455 index = __gen8_pte_index(start, 0);
456 page_size = I915_GTT_PAGE_SIZE;
457
458 if (!index &&
459 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
460 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
461 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
462 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
463 maybe_64K = __gen8_pte_index(start, 1);
464
465 vaddr = kmap_atomic_px(pt);
466 }
467
468 do {
469 GEM_BUG_ON(iter->sg->length < page_size);
470 vaddr[index++] = encode | iter->dma;
471
472 start += page_size;
473 iter->dma += page_size;
474 rem -= page_size;
475 if (iter->dma >= iter->max) {
476 iter->sg = __sg_next(iter->sg);
477 if (!iter->sg)
478 break;
479
480 rem = iter->sg->length;
481 iter->dma = sg_dma_address(iter->sg);
482 iter->max = iter->dma + rem;
483
484 if (maybe_64K != -1 && index < I915_PDES &&
485 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
486 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
487 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
488 maybe_64K = -1;
489
490 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
491 break;
492 }
493 } while (rem >= page_size && index < I915_PDES);
494
495 kunmap_atomic(vaddr);
496
497 /*
498 * Is it safe to mark the 2M block as 64K? -- Either we have
499 * filled whole page-table with 64K entries, or filled part of
500 * it and have reached the end of the sg table and we have
501 * enough padding.
502 */
503 if (maybe_64K != -1 &&
504 (index == I915_PDES ||
505 (i915_vm_has_scratch_64K(vma->vm) &&
506 !iter->sg && IS_ALIGNED(vma->node.start +
507 vma->node.size,
508 I915_GTT_PAGE_SIZE_2M)))) {
509 vaddr = kmap_atomic_px(pd);
510 vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
511 kunmap_atomic(vaddr);
512 page_size = I915_GTT_PAGE_SIZE_64K;
513
514 /*
515 * We write all 4K page entries, even when using 64K
516 * pages. In order to verify that the HW isn't cheating
517 * by using the 4K PTE instead of the 64K PTE, we want
518 * to remove all the surplus entries. If the HW skipped
519 * the 64K PTE, it will read/write into the scratch page
520 * instead - which we detect as missing results during
521 * selftests.
522 */
523 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
524 u16 i;
525
526 encode = vma->vm->scratch[0].encode;
527 vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
528
529 for (i = 1; i < index; i += 16)
530 memset64(vaddr + i, encode, 15);
531
532 kunmap_atomic(vaddr);
533 }
534 }
535
536 vma->page_sizes.gtt |= page_size;
537 } while (iter->sg);
538 }
539
540 static void gen8_ppgtt_insert(struct i915_address_space *vm,
541 struct i915_vma *vma,
542 enum i915_cache_level cache_level,
543 u32 flags)
544 {
545 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
546 struct sgt_dma iter = sgt_dma(vma);
547
548 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
549 gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
550 } else {
551 u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
552
553 do {
554 struct i915_page_directory * const pdp =
555 gen8_pdp_for_page_index(vm, idx);
556
557 idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
558 cache_level, flags);
559 } while (idx);
560
561 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
562 }
563 }
564
565 static int gen8_init_scratch(struct i915_address_space *vm)
566 {
567 int ret;
568 int i;
569
570 /*
571 * If everybody agrees to not to write into the scratch page,
572 * we can reuse it for all vm, keeping contexts and processes separate.
573 */
574 if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
575 struct i915_address_space *clone = vm->gt->vm;
576
577 GEM_BUG_ON(!clone->has_read_only);
578
579 vm->scratch_order = clone->scratch_order;
580 memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
581 px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
582 return 0;
583 }
584
585 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
586 if (ret)
587 return ret;
588
589 vm->scratch[0].encode =
590 gen8_pte_encode(px_dma(&vm->scratch[0]),
591 I915_CACHE_LLC, vm->has_read_only);
592
593 for (i = 1; i <= vm->top; i++) {
594 if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
595 goto free_scratch;
596
597 fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
598 vm->scratch[i].encode =
599 gen8_pde_encode(px_dma(&vm->scratch[i]),
600 I915_CACHE_LLC);
601 }
602
603 return 0;
604
605 free_scratch:
606 free_scratch(vm);
607 return -ENOMEM;
608 }
609
610 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
611 {
612 struct i915_address_space *vm = &ppgtt->vm;
613 struct i915_page_directory *pd = ppgtt->pd;
614 unsigned int idx;
615
616 GEM_BUG_ON(vm->top != 2);
617 GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
618
619 for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
620 struct i915_page_directory *pde;
621
622 pde = alloc_pd(vm);
623 if (IS_ERR(pde))
624 return PTR_ERR(pde);
625
626 fill_px(pde, vm->scratch[1].encode);
627 set_pd_entry(pd, idx, pde);
628 atomic_inc(px_used(pde)); /* keep pinned */
629 }
630 wmb();
631
632 return 0;
633 }
634
635 static struct i915_page_directory *
636 gen8_alloc_top_pd(struct i915_address_space *vm)
637 {
638 const unsigned int count = gen8_pd_top_count(vm);
639 struct i915_page_directory *pd;
640
641 GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
642
643 pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
644 if (unlikely(!pd))
645 return ERR_PTR(-ENOMEM);
646
647 if (unlikely(setup_page_dma(vm, px_base(pd)))) {
648 kfree(pd);
649 return ERR_PTR(-ENOMEM);
650 }
651
652 fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
653 atomic_inc(px_used(pd)); /* mark as pinned */
654 return pd;
655 }
656
657 /*
658 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
659 * with a net effect resembling a 2-level page table in normal x86 terms. Each
660 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
661 * space.
662 *
663 */
664 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
665 {
666 struct i915_ppgtt *ppgtt;
667 int err;
668
669 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
670 if (!ppgtt)
671 return ERR_PTR(-ENOMEM);
672
673 ppgtt_init(ppgtt, gt);
674 ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
675
676 /*
677 * From bdw, there is hw support for read-only pages in the PPGTT.
678 *
679 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
680 * for now.
681 *
682 * Gen12 has inherited the same read-only fault issue from gen11.
683 */
684 ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
685
686 /*
687 * There are only few exceptions for gen >=6. chv and bxt.
688 * And we are not sure about the latter so play safe for now.
689 */
690 if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
691 ppgtt->vm.pt_kmap_wc = true;
692
693 err = gen8_init_scratch(&ppgtt->vm);
694 if (err)
695 goto err_free;
696
697 ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
698 if (IS_ERR(ppgtt->pd)) {
699 err = PTR_ERR(ppgtt->pd);
700 goto err_free_scratch;
701 }
702
703 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
704 err = gen8_preallocate_top_level_pdp(ppgtt);
705 if (err)
706 goto err_free_pd;
707 }
708
709 ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
710 ppgtt->vm.insert_entries = gen8_ppgtt_insert;
711 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
712 ppgtt->vm.clear_range = gen8_ppgtt_clear;
713
714 if (intel_vgpu_active(gt->i915))
715 gen8_ppgtt_notify_vgt(ppgtt, true);
716
717 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
718
719 return ppgtt;
720
721 err_free_pd:
722 __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
723 gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
724 err_free_scratch:
725 free_scratch(&ppgtt->vm);
726 err_free:
727 kfree(ppgtt);
728 return ERR_PTR(err);
729 }
730