gen8_ppgtt.c revision 1.7 1 /* $NetBSD: gen8_ppgtt.c,v 1.7 2021/12/19 12:07:47 riastradh Exp $ */
2
3 // SPDX-License-Identifier: MIT
4 /*
5 * Copyright 2020 Intel Corporation
6 */
7
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: gen8_ppgtt.c,v 1.7 2021/12/19 12:07:47 riastradh Exp $");
10
11 #include <linux/log2.h>
12
13 #include "gen8_ppgtt.h"
14 #include "i915_scatterlist.h"
15 #include "i915_trace.h"
16 #include "i915_vgpu.h"
17 #include "intel_gt.h"
18 #include "intel_gtt.h"
19
20 static u64 gen8_pde_encode(const dma_addr_t addr,
21 const enum i915_cache_level level)
22 {
23 u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
24
25 if (level != I915_CACHE_NONE)
26 pde |= PPAT_CACHED_PDE;
27 else
28 pde |= PPAT_UNCACHED;
29
30 return pde;
31 }
32
33 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
34 {
35 struct drm_i915_private *i915 = ppgtt->vm.i915;
36 struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
37 enum vgt_g2v_type msg;
38 int i;
39
40 if (create)
41 atomic_inc(px_used(ppgtt->pd)); /* never remove */
42 else
43 atomic_dec(px_used(ppgtt->pd));
44
45 mutex_lock(&i915->vgpu.lock);
46
47 if (i915_vm_is_4lvl(&ppgtt->vm)) {
48 const u64 daddr = px_dma(ppgtt->pd);
49
50 intel_uncore_write(uncore,
51 vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
52 intel_uncore_write(uncore,
53 vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
54
55 msg = create ?
56 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
57 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
58 } else {
59 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
60 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
61
62 intel_uncore_write(uncore,
63 vgtif_reg(pdp[i].lo),
64 lower_32_bits(daddr));
65 intel_uncore_write(uncore,
66 vgtif_reg(pdp[i].hi),
67 upper_32_bits(daddr));
68 }
69
70 msg = create ?
71 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
72 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
73 }
74
75 /* g2v_notify atomically (via hv trap) consumes the message packet. */
76 intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
77
78 mutex_unlock(&i915->vgpu.lock);
79 }
80
81 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
82 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
83 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
84 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
85 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
86 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
87 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
88 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
89
90 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
91
92 static inline unsigned int
93 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
94 {
95 const int shift = gen8_pd_shift(lvl);
96 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
97
98 GEM_BUG_ON(start >= end);
99 end += ~mask >> gen8_pd_shift(1);
100
101 *idx = i915_pde_index(start, shift);
102 if ((start ^ end) & mask)
103 return GEN8_PDES - *idx;
104 else
105 return i915_pde_index(end, shift) - *idx;
106 }
107
108 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
109 {
110 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
111
112 GEM_BUG_ON(start >= end);
113 return (start ^ end) & mask && (start & ~mask) == 0;
114 }
115
116 static inline unsigned int gen8_pt_count(u64 start, u64 end)
117 {
118 GEM_BUG_ON(start >= end);
119 if ((start ^ end) >> gen8_pd_shift(1))
120 return GEN8_PDES - (start & (GEN8_PDES - 1));
121 else
122 return end - start;
123 }
124
125 static inline unsigned int
126 gen8_pd_top_count(const struct i915_address_space *vm)
127 {
128 unsigned int shift = __gen8_pte_shift(vm->top);
129 return (vm->total + (1ull << shift) - 1) >> shift;
130 }
131
132 static inline struct i915_page_directory *
133 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
134 {
135 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
136
137 if (vm->top == 2)
138 return ppgtt->pd;
139 else
140 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
141 }
142
143 static inline struct i915_page_directory *
144 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
145 {
146 return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
147 }
148
149 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
150 struct i915_page_directory *pd,
151 int count, int lvl)
152 {
153 if (lvl) {
154 void **pde = pd->entry;
155
156 do {
157 if (!*pde)
158 continue;
159
160 __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
161 } while (pde++, --count);
162 }
163
164 spin_lock_destroy(&pd->lock);
165 free_px(vm, pd);
166 }
167
168 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
169 {
170 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
171
172 if (intel_vgpu_active(vm->i915))
173 gen8_ppgtt_notify_vgt(ppgtt, false);
174
175 __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
176 free_scratch(vm);
177 }
178
179 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
180 struct i915_page_directory * const pd,
181 u64 start, const u64 end, int lvl)
182 {
183 const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
184 unsigned int idx, len;
185
186 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
187
188 len = gen8_pd_range(start, end, lvl--, &idx);
189 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
190 __func__, vm, lvl + 1, start, end,
191 idx, len, atomic_read(px_used(pd)));
192 GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
193
194 do {
195 struct i915_page_table *pt = pd->entry[idx];
196
197 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
198 gen8_pd_contains(start, end, lvl)) {
199 DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
200 __func__, vm, lvl + 1, idx, start, end);
201 clear_pd_entry(pd, idx, scratch);
202 __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
203 start += (u64)I915_PDES << gen8_pd_shift(lvl);
204 continue;
205 }
206
207 if (lvl) {
208 start = __gen8_ppgtt_clear(vm, as_pd(pt),
209 start, end, lvl);
210 } else {
211 unsigned int count;
212 u64 *vaddr;
213
214 count = gen8_pt_count(start, end);
215 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
216 __func__, vm, lvl, start, end,
217 gen8_pd_index(start, 0), count,
218 atomic_read(&pt->used));
219 GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
220
221 vaddr = kmap_atomic_px(pt);
222 memset64(vaddr + gen8_pd_index(start, 0),
223 vm->scratch[0].encode,
224 count);
225 kunmap_atomic(vaddr);
226
227 atomic_sub(count, &pt->used);
228 start += count;
229 }
230
231 if (release_pd_entry(pd, idx, pt, scratch))
232 free_px(vm, pt);
233 } while (idx++, --len);
234
235 return start;
236 }
237
238 static void gen8_ppgtt_clear(struct i915_address_space *vm,
239 u64 start, u64 length)
240 {
241 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
242 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
243 GEM_BUG_ON(range_overflows(start, length, vm->total));
244
245 start >>= GEN8_PTE_SHIFT;
246 length >>= GEN8_PTE_SHIFT;
247 GEM_BUG_ON(length == 0);
248
249 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
250 start, start + length, vm->top);
251 }
252
253 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
254 struct i915_page_directory * const pd,
255 u64 * const start, const u64 end, int lvl)
256 {
257 const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
258 struct i915_page_table *alloc = NULL;
259 unsigned int idx, len;
260 int ret = 0;
261
262 GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
263
264 len = gen8_pd_range(*start, end, lvl--, &idx);
265 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
266 __func__, vm, lvl + 1, *start, end,
267 idx, len, atomic_read(px_used(pd)));
268 GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
269
270 spin_lock(&pd->lock);
271 GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
272 do {
273 struct i915_page_table *pt = pd->entry[idx];
274
275 if (!pt) {
276 spin_unlock(&pd->lock);
277
278 DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
279 __func__, vm, lvl + 1, idx);
280
281 pt = fetch_and_zero(&alloc);
282 if (lvl) {
283 if (!pt) {
284 pt = &alloc_pd(vm)->pt;
285 if (IS_ERR(pt)) {
286 ret = PTR_ERR(pt);
287 goto out;
288 }
289 }
290
291 fill_px(pt, vm->scratch[lvl].encode);
292 } else {
293 if (!pt) {
294 pt = alloc_pt(vm);
295 if (IS_ERR(pt)) {
296 ret = PTR_ERR(pt);
297 goto out;
298 }
299 }
300
301 if (intel_vgpu_active(vm->i915) ||
302 gen8_pt_count(*start, end) < I915_PDES)
303 fill_px(pt, vm->scratch[lvl].encode);
304 }
305
306 spin_lock(&pd->lock);
307 if (likely(!pd->entry[idx]))
308 set_pd_entry(pd, idx, pt);
309 else
310 alloc = pt, pt = pd->entry[idx];
311 }
312
313 if (lvl) {
314 atomic_inc(&pt->used);
315 spin_unlock(&pd->lock);
316
317 ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
318 start, end, lvl);
319 if (unlikely(ret)) {
320 if (release_pd_entry(pd, idx, pt, scratch))
321 free_px(vm, pt);
322 goto out;
323 }
324
325 spin_lock(&pd->lock);
326 atomic_dec(&pt->used);
327 GEM_BUG_ON(!atomic_read(&pt->used));
328 } else {
329 unsigned int count = gen8_pt_count(*start, end);
330
331 DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
332 __func__, vm, lvl, *start, end,
333 gen8_pd_index(*start, 0), count,
334 atomic_read(&pt->used));
335
336 atomic_add(count, &pt->used);
337 /* All other pdes may be simultaneously removed */
338 GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
339 *start += count;
340 }
341 } while (idx++, --len);
342 spin_unlock(&pd->lock);
343 out:
344 if (alloc) {
345 if (lvl) {
346 struct i915_page_directory *allocpd =
347 container_of(alloc, struct i915_page_directory,
348 pt);
349 spin_lock_destroy(&allocpd->lock);
350 }
351 free_px(vm, alloc);
352 }
353 return ret;
354 }
355
356 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
357 u64 start, u64 length)
358 {
359 u64 from;
360 int err;
361
362 GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
363 GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
364 GEM_BUG_ON(range_overflows(start, length, vm->total));
365
366 start >>= GEN8_PTE_SHIFT;
367 length >>= GEN8_PTE_SHIFT;
368 GEM_BUG_ON(length == 0);
369 from = start;
370
371 err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
372 &start, start + length, vm->top);
373 if (unlikely(err && from != start))
374 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
375 from, start, vm->top);
376
377 return err;
378 }
379
380 static __always_inline u64
381 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
382 struct i915_page_directory *pdp,
383 struct sgt_dma *iter,
384 u64 idx,
385 enum i915_cache_level cache_level,
386 u32 flags)
387 {
388 struct i915_page_directory *pd;
389 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
390 gen8_pte_t *vaddr;
391
392 pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
393 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
394 do {
395 #ifdef __NetBSD__
396 KASSERT(iter->seg < iter->map->dm_nsegs);
397 KASSERT((iter->off & (I915_GTT_PAGE_SIZE - 1)) == 0);
398 const bus_dma_segment_t *seg = &iter->map->dm_segs[iter->seg];
399 KASSERT((seg->ds_addr & (I915_GTT_PAGE_SIZE - 1)) == 0);
400 KASSERT((seg->ds_len & (I915_GTT_PAGE_SIZE - 1)) == 0);
401 KASSERT(iter->off <= seg->ds_len - I915_GTT_PAGE_SIZE);
402 vaddr[gen8_pd_index(idx, 0)] =
403 pte_encode | (seg->ds_addr + iter->off);
404 iter->off += I915_GTT_PAGE_SIZE;
405 if (iter->off >= seg->ds_len) {
406 GEM_BUG_ON(iter->off > seg->ds_len);
407 iter->off = 0;
408 if (++iter->seg >= iter->map->dm_nsegs) {
409 GEM_BUG_ON(iter->seg > iter->map->dm_nsegs);
410 idx = 0;
411 break;
412 }
413 }
414 #else
415 GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
416 vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
417
418 iter->dma += I915_GTT_PAGE_SIZE;
419 if (iter->dma >= iter->max) {
420 iter->sg = __sg_next(iter->sg);
421 if (!iter->sg) {
422 idx = 0;
423 break;
424 }
425
426 iter->dma = sg_dma_address(iter->sg);
427 iter->max = iter->dma + iter->sg->length;
428 }
429 #endif
430
431 if (gen8_pd_index(++idx, 0) == 0) {
432 if (gen8_pd_index(idx, 1) == 0) {
433 /* Limited by sg length for 3lvl */
434 if (gen8_pd_index(idx, 2) == 0)
435 break;
436
437 pd = pdp->entry[gen8_pd_index(idx, 2)];
438 }
439
440 kunmap_atomic(vaddr);
441 vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
442 }
443 } while (1);
444 kunmap_atomic(vaddr);
445
446 return idx;
447 }
448
449 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
450 struct sgt_dma *iter,
451 enum i915_cache_level cache_level,
452 u32 flags)
453 {
454 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
455 u64 start = vma->node.start;
456 #ifdef __NetBSD__
457 bus_size_t rem = iter->map->dm_segs[iter->seg].ds_len - iter->off;
458 #else
459 dma_addr_t rem = iter->sg->length;
460 #endif
461
462 GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
463
464 do {
465 struct i915_page_directory * const pdp =
466 gen8_pdp_for_page_address(vma->vm, start);
467 struct i915_page_directory * const pd =
468 i915_pd_entry(pdp, __gen8_pte_index(start, 2));
469 gen8_pte_t encode = pte_encode;
470 unsigned int maybe_64K = -1;
471 unsigned int page_size;
472 gen8_pte_t *vaddr;
473 u16 index;
474
475 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
476 #ifdef __NetBSD__
477 IS_ALIGNED((iter->map->dm_segs[iter->seg].ds_addr +
478 iter->off),
479 I915_GTT_PAGE_SIZE_2M) &&
480 #else
481 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
482 #endif
483 rem >= I915_GTT_PAGE_SIZE_2M &&
484 !__gen8_pte_index(start, 0)) {
485 index = __gen8_pte_index(start, 1);
486 encode |= GEN8_PDE_PS_2M;
487 page_size = I915_GTT_PAGE_SIZE_2M;
488
489 vaddr = kmap_atomic_px(pd);
490 } else {
491 struct i915_page_table *pt =
492 i915_pt_entry(pd, __gen8_pte_index(start, 1));
493
494 index = __gen8_pte_index(start, 0);
495 page_size = I915_GTT_PAGE_SIZE;
496
497 if (!index &&
498 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
499 #ifdef __NetBSD__
500 IS_ALIGNED((iter->map->dm_segs[iter->seg].ds_addr
501 + iter->off),
502 I915_GTT_PAGE_SIZE_64K) &&
503 #else
504 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
505 #endif
506 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
507 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
508 maybe_64K = __gen8_pte_index(start, 1);
509
510 vaddr = kmap_atomic_px(pt);
511 }
512
513 do {
514 #ifdef __NetBSD__
515 GEM_BUG_ON((iter->map->dm_segs[iter->seg].ds_len -
516 iter->off) < page_size);
517 vaddr[index++] = encode |
518 (iter->map->dm_segs[iter->seg].ds_addr
519 + iter->off);
520 #else
521 GEM_BUG_ON(iter->sg->length < page_size);
522 vaddr[index++] = encode | iter->dma;
523 #endif
524
525 start += page_size;
526 #ifdef __NetBSD__
527 iter->off += page_size;
528 rem -= page_size;
529 if (iter->off >= iter->map->dm_segs[iter->seg].ds_len) {
530 GEM_BUG_ON(iter->off >
531 iter->map->dm_segs[iter->seg].ds_len);
532 iter->off = 0;
533 if (++iter->seg >= iter->map->dm_nsegs) {
534 GEM_BUG_ON(iter->seg >
535 iter->map->dm_nsegs);
536 break;
537 }
538 const bus_dma_segment_t *seg =
539 &iter->map->dm_segs[iter->seg];
540 if (maybe_64K && index < I915_PDES &&
541 !(IS_ALIGNED((seg->ds_addr + iter->off),
542 I915_GTT_PAGE_SIZE_64K) &&
543 (IS_ALIGNED(rem,
544 I915_GTT_PAGE_SIZE_64K) ||
545 rem >= ((I915_PDES - index) * I915_GTT_PAGE_SIZE))))
546 maybe_64K = false;
547 if (unlikely(!IS_ALIGNED((seg->ds_addr +
548 iter->off), page_size)))
549 break;
550 }
551 #else
552 iter->dma += page_size;
553 rem -= page_size;
554 if (iter->dma >= iter->max) {
555 iter->sg = __sg_next(iter->sg);
556 if (!iter->sg)
557 break;
558
559 rem = iter->sg->length;
560 iter->dma = sg_dma_address(iter->sg);
561 iter->max = iter->dma + rem;
562
563 if (maybe_64K != -1 && index < I915_PDES &&
564 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
565 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
566 rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
567 maybe_64K = -1;
568
569 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
570 break;
571 }
572 #endif
573 } while (rem >= page_size && index < I915_PDES);
574
575 kunmap_atomic(vaddr);
576
577 /*
578 * Is it safe to mark the 2M block as 64K? -- Either we have
579 * filled whole page-table with 64K entries, or filled part of
580 * it and have reached the end of the sg table and we have
581 * enough padding.
582 */
583 if (maybe_64K != -1 &&
584 (index == I915_PDES ||
585 (i915_vm_has_scratch_64K(vma->vm) &&
586 #ifdef __NetBSD__
587 iter->seg == iter->map->dm_nsegs &&
588 #else
589 !iter->sg &&
590 #endif
591 IS_ALIGNED(vma->node.start +
592 vma->node.size,
593 I915_GTT_PAGE_SIZE_2M)))) {
594 vaddr = kmap_atomic_px(pd);
595 vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
596 kunmap_atomic(vaddr);
597 page_size = I915_GTT_PAGE_SIZE_64K;
598
599 /*
600 * We write all 4K page entries, even when using 64K
601 * pages. In order to verify that the HW isn't cheating
602 * by using the 4K PTE instead of the 64K PTE, we want
603 * to remove all the surplus entries. If the HW skipped
604 * the 64K PTE, it will read/write into the scratch page
605 * instead - which we detect as missing results during
606 * selftests.
607 */
608 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
609 u16 i;
610
611 encode = vma->vm->scratch[0].encode;
612 vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
613
614 for (i = 1; i < index; i += 16)
615 memset64(vaddr + i, encode, 15);
616
617 kunmap_atomic(vaddr);
618 }
619 }
620
621 vma->page_sizes.gtt |= page_size;
622 }
623 #ifdef __NetBSD__
624 while (iter->seg < iter->map->dm_nsegs);
625 #else
626 while (iter->sg);
627 #endif
628 }
629
630 static void gen8_ppgtt_insert(struct i915_address_space *vm,
631 struct i915_vma *vma,
632 enum i915_cache_level cache_level,
633 u32 flags)
634 {
635 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
636 struct sgt_dma iter = sgt_dma(vma);
637
638 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
639 gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
640 } else {
641 u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
642
643 do {
644 struct i915_page_directory * const pdp =
645 gen8_pdp_for_page_index(vm, idx);
646
647 idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
648 cache_level, flags);
649 } while (idx);
650
651 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
652 }
653 }
654
655 static int gen8_init_scratch(struct i915_address_space *vm)
656 {
657 int ret;
658 int i;
659
660 /*
661 * If everybody agrees to not to write into the scratch page,
662 * we can reuse it for all vm, keeping contexts and processes separate.
663 */
664 if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
665 struct i915_address_space *clone = vm->gt->vm;
666
667 GEM_BUG_ON(!clone->has_read_only);
668
669 vm->scratch_order = clone->scratch_order;
670 memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
671 px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
672 return 0;
673 }
674
675 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
676 if (ret)
677 return ret;
678
679 vm->scratch[0].encode =
680 gen8_pte_encode(px_dma(&vm->scratch[0]),
681 I915_CACHE_LLC, vm->has_read_only);
682
683 for (i = 1; i <= vm->top; i++) {
684 if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
685 goto free_scratch;
686
687 fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
688 vm->scratch[i].encode =
689 gen8_pde_encode(px_dma(&vm->scratch[i]),
690 I915_CACHE_LLC);
691 }
692
693 return 0;
694
695 free_scratch:
696 free_scratch(vm);
697 return -ENOMEM;
698 }
699
700 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
701 {
702 struct i915_address_space *vm = &ppgtt->vm;
703 struct i915_page_directory *pd = ppgtt->pd;
704 unsigned int idx;
705
706 GEM_BUG_ON(vm->top != 2);
707 GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
708
709 for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
710 struct i915_page_directory *pde;
711
712 pde = alloc_pd(vm);
713 if (IS_ERR(pde))
714 return PTR_ERR(pde);
715
716 fill_px(pde, vm->scratch[1].encode);
717 set_pd_entry(pd, idx, pde);
718 atomic_inc(px_used(pde)); /* keep pinned */
719 }
720 wmb();
721
722 return 0;
723 }
724
725 static struct i915_page_directory *
726 gen8_alloc_top_pd(struct i915_address_space *vm)
727 {
728 const unsigned int count = gen8_pd_top_count(vm);
729 struct i915_page_directory *pd;
730
731 GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
732
733 pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
734 if (unlikely(!pd))
735 return ERR_PTR(-ENOMEM);
736
737 if (unlikely(setup_page_dma(vm, px_base(pd)))) {
738 kfree(pd);
739 return ERR_PTR(-ENOMEM);
740 }
741
742 fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
743 atomic_inc(px_used(pd)); /* mark as pinned */
744 return pd;
745 }
746
747 /*
748 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
749 * with a net effect resembling a 2-level page table in normal x86 terms. Each
750 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
751 * space.
752 *
753 */
754 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
755 {
756 struct i915_ppgtt *ppgtt;
757 int err;
758
759 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
760 if (!ppgtt)
761 return ERR_PTR(-ENOMEM);
762
763 ppgtt_init(ppgtt, gt);
764 ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
765
766 /*
767 * From bdw, there is hw support for read-only pages in the PPGTT.
768 *
769 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
770 * for now.
771 *
772 * Gen12 has inherited the same read-only fault issue from gen11.
773 */
774 ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
775
776 /*
777 * There are only few exceptions for gen >=6. chv and bxt.
778 * And we are not sure about the latter so play safe for now.
779 */
780 if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
781 ppgtt->vm.pt_kmap_wc = true;
782
783 err = gen8_init_scratch(&ppgtt->vm);
784 if (err)
785 goto err_free;
786
787 ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
788 if (IS_ERR(ppgtt->pd)) {
789 err = PTR_ERR(ppgtt->pd);
790 goto err_free_scratch;
791 }
792
793 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
794 err = gen8_preallocate_top_level_pdp(ppgtt);
795 if (err)
796 goto err_free_pd;
797 }
798
799 ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
800 ppgtt->vm.insert_entries = gen8_ppgtt_insert;
801 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
802 ppgtt->vm.clear_range = gen8_ppgtt_clear;
803
804 if (intel_vgpu_active(gt->i915))
805 gen8_ppgtt_notify_vgt(ppgtt, true);
806
807 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
808
809 return ppgtt;
810
811 err_free_pd:
812 __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
813 gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
814 err_free_scratch:
815 free_scratch(&ppgtt->vm);
816 err_free:
817 kfree(ppgtt);
818 return ERR_PTR(err);
819 }
820