intel_gtt.c revision 1.7 1 /* $NetBSD: intel_gtt.c,v 1.7 2021/12/19 11:45:01 riastradh Exp $ */
2
3 // SPDX-License-Identifier: MIT
4 /*
5 * Copyright 2020 Intel Corporation
6 */
7
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: intel_gtt.c,v 1.7 2021/12/19 11:45:01 riastradh Exp $");
10
11 #include <linux/slab.h> /* fault-inject.h is not standalone! */
12
13 #include <linux/fault-inject.h>
14
15 #include "i915_trace.h"
16 #include "intel_gt.h"
17 #include "intel_gtt.h"
18
19 #include <linux/nbsd-namespace.h>
20
21 #ifndef __NetBSD__
22 void stash_init(struct pagestash *stash)
23 {
24 pagevec_init(&stash->pvec);
25 spin_lock_init(&stash->lock);
26 }
27
28 static struct page *stash_pop_page(struct pagestash *stash)
29 {
30 struct page *page = NULL;
31
32 spin_lock(&stash->lock);
33 if (likely(stash->pvec.nr))
34 page = stash->pvec.pages[--stash->pvec.nr];
35 spin_unlock(&stash->lock);
36
37 return page;
38 }
39
40 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
41 {
42 unsigned int nr;
43
44 spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
45
46 nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
47 memcpy(stash->pvec.pages + stash->pvec.nr,
48 pvec->pages + pvec->nr - nr,
49 sizeof(pvec->pages[0]) * nr);
50 stash->pvec.nr += nr;
51
52 spin_unlock(&stash->lock);
53
54 pvec->nr -= nr;
55 }
56
57 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
58 {
59 struct pagevec stack;
60 struct page *page;
61
62 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
63 i915_gem_shrink_all(vm->i915);
64
65 page = stash_pop_page(&vm->free_pages);
66 if (page)
67 return page;
68
69 if (!vm->pt_kmap_wc)
70 return alloc_page(gfp);
71
72 /* Look in our global stash of WC pages... */
73 page = stash_pop_page(&vm->i915->mm.wc_stash);
74 if (page)
75 return page;
76
77 /*
78 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
79 *
80 * We have to be careful as page allocation may trigger the shrinker
81 * (via direct reclaim) which will fill up the WC stash underneath us.
82 * So we add our WB pages into a temporary pvec on the stack and merge
83 * them into the WC stash after all the allocations are complete.
84 */
85 pagevec_init(&stack);
86 do {
87 struct page *page;
88
89 page = alloc_page(gfp);
90 if (unlikely(!page))
91 break;
92
93 stack.pages[stack.nr++] = page;
94 } while (pagevec_space(&stack));
95
96 if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
97 page = stack.pages[--stack.nr];
98
99 /* Merge spare WC pages to the global stash */
100 if (stack.nr)
101 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
102
103 /* Push any surplus WC pages onto the local VM stash */
104 if (stack.nr)
105 stash_push_pagevec(&vm->free_pages, &stack);
106 }
107
108 /* Return unwanted leftovers */
109 if (unlikely(stack.nr)) {
110 WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
111 __pagevec_release(&stack);
112 }
113
114 return page;
115 }
116
117 static void vm_free_pages_release(struct i915_address_space *vm,
118 bool immediate)
119 {
120 struct pagevec *pvec = &vm->free_pages.pvec;
121 struct pagevec stack;
122
123 lockdep_assert_held(&vm->free_pages.lock);
124 GEM_BUG_ON(!pagevec_count(pvec));
125
126 if (vm->pt_kmap_wc) {
127 /*
128 * When we use WC, first fill up the global stash and then
129 * only if full immediately free the overflow.
130 */
131 stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
132
133 /*
134 * As we have made some room in the VM's free_pages,
135 * we can wait for it to fill again. Unless we are
136 * inside i915_address_space_fini() and must
137 * immediately release the pages!
138 */
139 if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
140 return;
141
142 /*
143 * We have to drop the lock to allow ourselves to sleep,
144 * so take a copy of the pvec and clear the stash for
145 * others to use it as we sleep.
146 */
147 stack = *pvec;
148 pagevec_reinit(pvec);
149 spin_unlock(&vm->free_pages.lock);
150
151 pvec = &stack;
152 set_pages_array_wb(pvec->pages, pvec->nr);
153
154 spin_lock(&vm->free_pages.lock);
155 }
156
157 __pagevec_release(pvec);
158 }
159
160 static void vm_free_page(struct i915_address_space *vm, struct page *page)
161 {
162 /*
163 * On !llc, we need to change the pages back to WB. We only do so
164 * in bulk, so we rarely need to change the page attributes here,
165 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
166 * To make detection of the possible sleep more likely, use an
167 * unconditional might_sleep() for everybody.
168 */
169 might_sleep();
170 spin_lock(&vm->free_pages.lock);
171 while (!pagevec_space(&vm->free_pages.pvec))
172 vm_free_pages_release(vm, false);
173 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
174 pagevec_add(&vm->free_pages.pvec, page);
175 spin_unlock(&vm->free_pages.lock);
176 }
177 #endif
178
179 void __i915_vm_close(struct i915_address_space *vm)
180 {
181 struct i915_vma *vma, *vn;
182
183 mutex_lock(&vm->mutex);
184 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
185 struct drm_i915_gem_object *obj = vma->obj;
186
187 /* Keep the obj (and hence the vma) alive as _we_ destroy it */
188 if (!kref_get_unless_zero(&obj->base.refcount))
189 continue;
190
191 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
192 WARN_ON(__i915_vma_unbind(vma));
193 __i915_vma_put(vma);
194
195 i915_gem_object_put(obj);
196 }
197 GEM_BUG_ON(!list_empty(&vm->bound_list));
198 mutex_unlock(&vm->mutex);
199 }
200
201 void i915_address_space_fini(struct i915_address_space *vm)
202 {
203 #ifndef __NetBSD__
204 spin_lock(&vm->free_pages.lock);
205 if (pagevec_count(&vm->free_pages.pvec))
206 vm_free_pages_release(vm, true);
207 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
208 spin_unlock(&vm->free_pages.lock);
209 #endif
210
211 drm_mm_takedown(&vm->mm);
212
213 mutex_destroy(&vm->mutex);
214 }
215
216 static void __i915_vm_release(struct work_struct *work)
217 {
218 struct i915_address_space *vm =
219 container_of(work, struct i915_address_space, rcu.work);
220
221 vm->cleanup(vm);
222 i915_address_space_fini(vm);
223
224 kfree(vm);
225 }
226
227 void i915_vm_release(struct kref *kref)
228 {
229 struct i915_address_space *vm =
230 container_of(kref, struct i915_address_space, ref);
231
232 GEM_BUG_ON(i915_is_ggtt(vm));
233 trace_i915_ppgtt_release(vm);
234
235 queue_rcu_work(vm->i915->wq, &vm->rcu);
236 }
237
238 void i915_address_space_init(struct i915_address_space *vm, int subclass)
239 {
240 kref_init(&vm->ref);
241 INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
242 atomic_set(&vm->open, 1);
243
244 /*
245 * The vm->mutex must be reclaim safe (for use in the shrinker).
246 * Do a dummy acquire now under fs_reclaim so that any allocation
247 * attempt holding the lock is immediately reported by lockdep.
248 */
249 mutex_init(&vm->mutex);
250 lockdep_set_subclass(&vm->mutex, subclass);
251 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
252
253 GEM_BUG_ON(!vm->total);
254 drm_mm_init(&vm->mm, 0, vm->total);
255 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
256
257 #ifdef __NetBSD__
258 vm->dmat = vm->i915->drm.dmat;
259 #else
260 stash_init(&vm->free_pages);
261 #endif
262
263 INIT_LIST_HEAD(&vm->bound_list);
264 }
265
266 void clear_pages(struct i915_vma *vma)
267 {
268 GEM_BUG_ON(!vma->pages);
269
270 #ifdef __NetBSD__ /* XXX rotate pages */
271 GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
272 #else
273 if (vma->pages != vma->obj->mm.pages) {
274 sg_free_table(vma->pages);
275 kfree(vma->pages);
276 }
277 #endif
278 vma->pages = NULL;
279
280 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
281 }
282
283 static int __setup_page_dma(struct i915_address_space *vm,
284 struct i915_page_dma *p,
285 gfp_t gfp)
286 {
287 #ifdef __NetBSD__
288 int busdmaflags = 0;
289 int error;
290 int nseg = 1;
291
292 if (gfp & __GFP_WAIT)
293 busdmaflags |= BUS_DMA_WAITOK;
294 else
295 busdmaflags |= BUS_DMA_NOWAIT;
296
297 error = bus_dmamem_alloc(vm->dmat, PAGE_SIZE, PAGE_SIZE, 0, &p->seg,
298 nseg, &nseg, busdmaflags);
299 if (error) {
300 fail0: p->map = NULL;
301 return -error; /* XXX errno NetBSD->Linux */
302 }
303 KASSERT(nseg == 1);
304 error = bus_dmamap_create(vm->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
305 busdmaflags, &p->map);
306 if (error) {
307 fail1: bus_dmamem_free(vm->dmat, &p->seg, 1);
308 goto fail0;
309 }
310 error = bus_dmamap_load_raw(vm->dmat, p->map, &p->seg, 1, PAGE_SIZE,
311 busdmaflags);
312 if (error) {
313 fail2: __unused
314 bus_dmamap_destroy(vm->dmat, p->map);
315 goto fail1;
316 }
317
318 p->page = container_of(PHYS_TO_VM_PAGE(p->seg.ds_addr), struct page,
319 p_vmp);
320
321 if (gfp & __GFP_ZERO) {
322 void *va = kmap_atomic(p->page);
323 memset(va, 0, PAGE_SIZE);
324 kunmap_atomic(va);
325 }
326 #else
327 p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
328 if (unlikely(!p->page))
329 return -ENOMEM;
330
331 p->daddr = dma_map_page_attrs(vm->dma,
332 p->page, 0, PAGE_SIZE,
333 PCI_DMA_BIDIRECTIONAL,
334 DMA_ATTR_SKIP_CPU_SYNC |
335 DMA_ATTR_NO_WARN);
336 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
337 vm_free_page(vm, p->page);
338 return -ENOMEM;
339 }
340 #endif
341
342 return 0;
343 }
344
345 int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
346 {
347 return __setup_page_dma(vm, p, __GFP_HIGHMEM);
348 }
349
350 void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
351 {
352 #ifdef __NetBSD__
353 bus_dmamap_unload(vm->dmat, p->map);
354 bus_dmamap_destroy(vm->dmat, p->map);
355 bus_dmamem_free(vm->dmat, &p->seg, 1);
356 #else
357 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
358 vm_free_page(vm, p->page);
359 #endif
360 }
361
362 void
363 fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
364 {
365 kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
366 }
367
368 int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
369 {
370 unsigned long size;
371
372 /*
373 * In order to utilize 64K pages for an object with a size < 2M, we will
374 * need to support a 64K scratch page, given that every 16th entry for a
375 * page-table operating in 64K mode must point to a properly aligned 64K
376 * region, including any PTEs which happen to point to scratch.
377 *
378 * This is only relevant for the 48b PPGTT where we support
379 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
380 * scratch (read-only) between all vm, we create one 64k scratch page
381 * for all.
382 */
383 size = I915_GTT_PAGE_SIZE_4K;
384 if (i915_vm_is_4lvl(vm) &&
385 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
386 size = I915_GTT_PAGE_SIZE_64K;
387 gfp |= __GFP_NOWARN;
388 }
389 gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
390
391 do {
392 unsigned int order = get_order(size);
393 #ifdef __NetBSD__
394 struct vm_page *vm_page;
395 void *kva;
396 int nseg;
397 int ret;
398
399 /* Allocate a scratch page. */
400 /* XXX errno NetBSD->Linux */
401 ret = -bus_dmamem_alloc(vm->dmat, size, size, 0,
402 &vm->scratch[0].base.seg, 1, &nseg, BUS_DMA_NOWAIT);
403 if (ret)
404 goto skip;
405 KASSERT(nseg == 1);
406 KASSERT(vm->scratch[0].base.seg.ds_len == size);
407
408 /* Create a DMA map. */
409 ret = -bus_dmamap_create(vm->dmat, size, 1, size, 0,
410 BUS_DMA_NOWAIT, &vm->scratch[0].base.map);
411 if (ret)
412 goto free_dmamem;
413
414 /* Load the segment into the DMA map. */
415 ret = -bus_dmamap_load_raw(vm->dmat, vm->scratch[0].base.map,
416 &vm->scratch[0].base.seg, 1, size, BUS_DMA_NOWAIT);
417 if (ret)
418 goto destroy_dmamap;
419 KASSERT(vm->scratch[0].base.map->dm_nsegs == 1);
420 KASSERT(vm->scratch[0].base.map->dm_segs[0].ds_len == size);
421
422 /* Zero the page. */
423 ret = -bus_dmamem_map(vm->dmat, &vm->scratch[0].base.seg, 1,
424 size, &kva, BUS_DMA_NOWAIT|BUS_DMA_NOCACHE);
425 if (ret)
426 goto unload_dmamap;
427 memset(kva, 0, size);
428 bus_dmamap_sync(vm->dmat, vm->scratch[0].base.map, 0, size,
429 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
430 bus_dmamem_unmap(vm->dmat, kva, size);
431
432 /* XXX Is this page guaranteed to work as a huge page? */
433 vm_page = PHYS_TO_VM_PAGE(vm->scratch[0].base.seg.ds_addr);
434 vm->scratch[0].base.page = container_of(vm_page, struct page,
435 p_vmp);
436 #else
437 struct page *page;
438 dma_addr_t addr;
439
440 page = alloc_pages(gfp, order);
441 if (unlikely(!page))
442 goto skip;
443
444 addr = dma_map_page_attrs(vm->dma,
445 page, 0, size,
446 PCI_DMA_BIDIRECTIONAL,
447 DMA_ATTR_SKIP_CPU_SYNC |
448 DMA_ATTR_NO_WARN);
449 if (unlikely(dma_mapping_error(vm->dma, addr)))
450 goto free_page;
451
452 if (unlikely(!IS_ALIGNED(addr, size)))
453 goto unmap_page;
454
455 vm->scratch[0].base.page = page;
456 vm->scratch[0].base.daddr = addr;
457 #endif
458 vm->scratch_order = order;
459 return 0;
460
461 #ifdef __NetBSD__
462 unload_dmamap: bus_dmamap_unload(vm->dmat, vm->scratch[0].base.map);
463 destroy_dmamap: bus_dmamap_destroy(vm->dmat, vm->scratch[0].base.map);
464 vm->scratch[0].base.map = NULL; /* paranoia */
465 free_dmamem: bus_dmamem_free(vm->dmat, &vm->scratch[0].base.seg, 1);
466 #else
467 unmap_page:
468 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
469 free_page:
470 __free_pages(page, order);
471 #endif
472 skip:
473 if (size == I915_GTT_PAGE_SIZE_4K)
474 return -ENOMEM;
475
476 size = I915_GTT_PAGE_SIZE_4K;
477 gfp &= ~__GFP_NOWARN;
478 } while (1);
479 }
480
481 void cleanup_scratch_page(struct i915_address_space *vm)
482 {
483 struct i915_page_dma *p = px_base(&vm->scratch[0]);
484 #ifdef __NetBSD__
485 bus_dmamap_unload(vm->dmat, p->map);
486 bus_dmamap_destroy(vm->dmat, p->map);
487 vm->scratch[0].base.map = NULL; /* paranoia */
488 bus_dmamem_free(vm->dmat, &p->seg, 1);
489 #else
490 unsigned int order = vm->scratch_order;
491
492 dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
493 PCI_DMA_BIDIRECTIONAL);
494 __free_pages(p->page, order);
495 #endif
496 }
497
498 void free_scratch(struct i915_address_space *vm)
499 {
500 int i;
501
502 if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
503 return;
504
505 for (i = 1; i <= vm->top; i++) {
506 if (!px_dma(&vm->scratch[i]))
507 break;
508 cleanup_page_dma(vm, px_base(&vm->scratch[i]));
509 }
510
511 cleanup_scratch_page(vm);
512 }
513
514 void gtt_write_workarounds(struct intel_gt *gt)
515 {
516 struct drm_i915_private *i915 = gt->i915;
517 struct intel_uncore *uncore = gt->uncore;
518
519 /*
520 * This function is for gtt related workarounds. This function is
521 * called on driver load and after a GPU reset, so you can place
522 * workarounds here even if they get overwritten by GPU reset.
523 */
524 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
525 if (IS_BROADWELL(i915))
526 intel_uncore_write(uncore,
527 GEN8_L3_LRA_1_GPGPU,
528 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
529 else if (IS_CHERRYVIEW(i915))
530 intel_uncore_write(uncore,
531 GEN8_L3_LRA_1_GPGPU,
532 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
533 else if (IS_GEN9_LP(i915))
534 intel_uncore_write(uncore,
535 GEN8_L3_LRA_1_GPGPU,
536 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
537 else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
538 intel_uncore_write(uncore,
539 GEN8_L3_LRA_1_GPGPU,
540 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
541
542 /*
543 * To support 64K PTEs we need to first enable the use of the
544 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
545 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
546 * shouldn't be needed after GEN10.
547 *
548 * 64K pages were first introduced from BDW+, although technically they
549 * only *work* from gen9+. For pre-BDW we instead have the option for
550 * 32K pages, but we don't currently have any support for it in our
551 * driver.
552 */
553 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
554 INTEL_GEN(i915) <= 10)
555 intel_uncore_rmw(uncore,
556 GEN8_GAMW_ECO_DEV_RW_IA,
557 0,
558 GAMW_ECO_ENABLE_64K_IPS_FIELD);
559
560 if (IS_GEN_RANGE(i915, 8, 11)) {
561 bool can_use_gtt_cache = true;
562
563 /*
564 * According to the BSpec if we use 2M/1G pages then we also
565 * need to disable the GTT cache. At least on BDW we can see
566 * visual corruption when using 2M pages, and not disabling the
567 * GTT cache.
568 */
569 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
570 can_use_gtt_cache = false;
571
572 /* WaGttCachingOffByDefault */
573 intel_uncore_write(uncore,
574 HSW_GTT_CACHE_EN,
575 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
576 WARN_ON_ONCE(can_use_gtt_cache &&
577 intel_uncore_read(uncore,
578 HSW_GTT_CACHE_EN) == 0);
579 }
580 }
581
582 u64 gen8_pte_encode(dma_addr_t addr,
583 enum i915_cache_level level,
584 u32 flags)
585 {
586 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
587
588 if (unlikely(flags & PTE_READ_ONLY))
589 pte &= ~_PAGE_RW;
590
591 switch (level) {
592 case I915_CACHE_NONE:
593 pte |= PPAT_UNCACHED;
594 break;
595 case I915_CACHE_WT:
596 pte |= PPAT_DISPLAY_ELLC;
597 break;
598 default:
599 pte |= PPAT_CACHED;
600 break;
601 }
602
603 return pte;
604 }
605
606 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
607 {
608 /* TGL doesn't support LLC or AGE settings */
609 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
610 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
611 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
612 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
613 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
614 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
615 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
616 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
617 }
618
619 static void cnl_setup_private_ppat(struct intel_uncore *uncore)
620 {
621 intel_uncore_write(uncore,
622 GEN10_PAT_INDEX(0),
623 GEN8_PPAT_WB | GEN8_PPAT_LLC);
624 intel_uncore_write(uncore,
625 GEN10_PAT_INDEX(1),
626 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
627 intel_uncore_write(uncore,
628 GEN10_PAT_INDEX(2),
629 GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
630 intel_uncore_write(uncore,
631 GEN10_PAT_INDEX(3),
632 GEN8_PPAT_UC);
633 intel_uncore_write(uncore,
634 GEN10_PAT_INDEX(4),
635 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
636 intel_uncore_write(uncore,
637 GEN10_PAT_INDEX(5),
638 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
639 intel_uncore_write(uncore,
640 GEN10_PAT_INDEX(6),
641 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
642 intel_uncore_write(uncore,
643 GEN10_PAT_INDEX(7),
644 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
645 }
646
647 /*
648 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
649 * bits. When using advanced contexts each context stores its own PAT, but
650 * writing this data shouldn't be harmful even in those cases.
651 */
652 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
653 {
654 u64 pat;
655
656 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
657 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
658 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
659 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
660 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
661 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
662 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
663 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
664
665 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
666 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
667 }
668
669 static void chv_setup_private_ppat(struct intel_uncore *uncore)
670 {
671 u64 pat;
672
673 /*
674 * Map WB on BDW to snooped on CHV.
675 *
676 * Only the snoop bit has meaning for CHV, the rest is
677 * ignored.
678 *
679 * The hardware will never snoop for certain types of accesses:
680 * - CPU GTT (GMADR->GGTT->no snoop->memory)
681 * - PPGTT page tables
682 * - some other special cycles
683 *
684 * As with BDW, we also need to consider the following for GT accesses:
685 * "For GGTT, there is NO pat_sel[2:0] from the entry,
686 * so RTL will always use the value corresponding to
687 * pat_sel = 000".
688 * Which means we must set the snoop bit in PAT entry 0
689 * in order to keep the global status page working.
690 */
691
692 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
693 GEN8_PPAT(1, 0) |
694 GEN8_PPAT(2, 0) |
695 GEN8_PPAT(3, 0) |
696 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
697 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
698 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
699 GEN8_PPAT(7, CHV_PPAT_SNOOP);
700
701 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
702 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
703 }
704
705 void setup_private_pat(struct intel_uncore *uncore)
706 {
707 struct drm_i915_private *i915 = uncore->i915;
708
709 GEM_BUG_ON(INTEL_GEN(i915) < 8);
710
711 if (INTEL_GEN(i915) >= 12)
712 tgl_setup_private_ppat(uncore);
713 else if (INTEL_GEN(i915) >= 10)
714 cnl_setup_private_ppat(uncore);
715 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
716 chv_setup_private_ppat(uncore);
717 else
718 bdw_setup_private_ppat(uncore);
719 }
720
721 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
722 #include "selftests/mock_gtt.c"
723 #endif
724