i915_vma.c revision 1.3 1 /* $NetBSD: i915_vma.c,v 1.3 2021/12/19 01:24:26 riastradh Exp $ */
2
3 /*
4 * Copyright 2016 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: i915_vma.c,v 1.3 2021/12/19 01:24:26 riastradh Exp $");
29
30 #include <linux/sched/mm.h>
31 #include <drm/drm_gem.h>
32
33 #include "display/intel_frontbuffer.h"
34
35 #include "gt/intel_engine.h"
36 #include "gt/intel_engine_heartbeat.h"
37 #include "gt/intel_gt.h"
38 #include "gt/intel_gt_requests.h"
39
40 #include "i915_drv.h"
41 #include "i915_globals.h"
42 #include "i915_sw_fence_work.h"
43 #include "i915_trace.h"
44 #include "i915_vma.h"
45
46 static struct i915_global_vma {
47 struct i915_global base;
48 struct kmem_cache *slab_vmas;
49 } global;
50
51 struct i915_vma *i915_vma_alloc(void)
52 {
53 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
54 }
55
56 void i915_vma_free(struct i915_vma *vma)
57 {
58 return kmem_cache_free(global.slab_vmas, vma);
59 }
60
61 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
62
63 #include <linux/stackdepot.h>
64
65 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
66 {
67 unsigned long *entries;
68 unsigned int nr_entries;
69 char buf[512];
70
71 if (!vma->node.stack) {
72 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
73 vma->node.start, vma->node.size, reason);
74 return;
75 }
76
77 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
78 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
79 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
80 vma->node.start, vma->node.size, reason, buf);
81 }
82
83 #else
84
85 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
86 {
87 }
88
89 #endif
90
91 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
92 {
93 return container_of(ref, typeof(struct i915_vma), active);
94 }
95
96 static int __i915_vma_active(struct i915_active *ref)
97 {
98 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
99 }
100
101 __i915_active_call
102 static void __i915_vma_retire(struct i915_active *ref)
103 {
104 i915_vma_put(active_to_vma(ref));
105 }
106
107 static struct i915_vma *
108 vma_create(struct drm_i915_gem_object *obj,
109 struct i915_address_space *vm,
110 const struct i915_ggtt_view *view)
111 {
112 struct i915_vma *vma;
113 struct rb_node *rb, **p;
114
115 /* The aliasing_ppgtt should never be used directly! */
116 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
117
118 vma = i915_vma_alloc();
119 if (vma == NULL)
120 return ERR_PTR(-ENOMEM);
121
122 kref_init(&vma->ref);
123 mutex_init(&vma->pages_mutex);
124 vma->vm = i915_vm_get(vm);
125 vma->ops = &vm->vma_ops;
126 vma->obj = obj;
127 vma->resv = obj->base.resv;
128 vma->size = obj->base.size;
129 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
130
131 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
132
133 #ifndef __NetBSD__ /* XXX fs reclaim */
134 /* Declare ourselves safe for use inside shrinkers */
135 if (IS_ENABLED(CONFIG_LOCKDEP)) {
136 fs_reclaim_acquire(GFP_KERNEL);
137 might_lock(&vma->active.mutex);
138 fs_reclaim_release(GFP_KERNEL);
139 }
140 #endif
141
142 INIT_LIST_HEAD(&vma->closed_link);
143
144 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
145 vma->ggtt_view = *view;
146 if (view->type == I915_GGTT_VIEW_PARTIAL) {
147 GEM_BUG_ON(range_overflows_t(u64,
148 view->partial.offset,
149 view->partial.size,
150 obj->base.size >> PAGE_SHIFT));
151 vma->size = view->partial.size;
152 vma->size <<= PAGE_SHIFT;
153 GEM_BUG_ON(vma->size > obj->base.size);
154 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
155 vma->size = intel_rotation_info_size(&view->rotated);
156 vma->size <<= PAGE_SHIFT;
157 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
158 vma->size = intel_remapped_info_size(&view->remapped);
159 vma->size <<= PAGE_SHIFT;
160 }
161 }
162
163 if (unlikely(vma->size > vm->total))
164 goto err_vma;
165
166 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
167
168 if (i915_is_ggtt(vm)) {
169 if (unlikely(overflows_type(vma->size, u32)))
170 goto err_vma;
171
172 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
173 i915_gem_object_get_tiling(obj),
174 i915_gem_object_get_stride(obj));
175 if (unlikely(vma->fence_size < vma->size || /* overflow */
176 vma->fence_size > vm->total))
177 goto err_vma;
178
179 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
180
181 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
182 i915_gem_object_get_tiling(obj),
183 i915_gem_object_get_stride(obj));
184 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
185
186 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
187 }
188
189 spin_lock(&obj->vma.lock);
190
191 rb = NULL;
192 p = &obj->vma.tree.rb_node;
193 while (*p) {
194 struct i915_vma *pos;
195 long cmp;
196
197 rb = *p;
198 pos = rb_entry(rb, struct i915_vma, obj_node);
199
200 /*
201 * If the view already exists in the tree, another thread
202 * already created a matching vma, so return the older instance
203 * and dispose of ours.
204 */
205 cmp = i915_vma_compare(pos, vm, view);
206 if (cmp == 0) {
207 spin_unlock(&obj->vma.lock);
208 i915_vma_free(vma);
209 return pos;
210 }
211
212 if (cmp < 0)
213 p = &rb->rb_right;
214 else
215 p = &rb->rb_left;
216 }
217 rb_link_node(&vma->obj_node, rb, p);
218 rb_insert_color(&vma->obj_node, &obj->vma.tree);
219
220 if (i915_vma_is_ggtt(vma))
221 /*
222 * We put the GGTT vma at the start of the vma-list, followed
223 * by the ppGGTT vma. This allows us to break early when
224 * iterating over only the GGTT vma for an object, see
225 * for_each_ggtt_vma()
226 */
227 list_add(&vma->obj_link, &obj->vma.list);
228 else
229 list_add_tail(&vma->obj_link, &obj->vma.list);
230
231 spin_unlock(&obj->vma.lock);
232
233 return vma;
234
235 err_vma:
236 i915_vma_free(vma);
237 return ERR_PTR(-E2BIG);
238 }
239
240 static struct i915_vma *
241 vma_lookup(struct drm_i915_gem_object *obj,
242 struct i915_address_space *vm,
243 const struct i915_ggtt_view *view)
244 {
245 struct rb_node *rb;
246
247 rb = obj->vma.tree.rb_node;
248 while (rb) {
249 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
250 long cmp;
251
252 cmp = i915_vma_compare(vma, vm, view);
253 if (cmp == 0)
254 return vma;
255
256 if (cmp < 0)
257 rb = rb->rb_right;
258 else
259 rb = rb->rb_left;
260 }
261
262 return NULL;
263 }
264
265 /**
266 * i915_vma_instance - return the singleton instance of the VMA
267 * @obj: parent &struct drm_i915_gem_object to be mapped
268 * @vm: address space in which the mapping is located
269 * @view: additional mapping requirements
270 *
271 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
272 * the same @view characteristics. If a match is not found, one is created.
273 * Once created, the VMA is kept until either the object is freed, or the
274 * address space is closed.
275 *
276 * Returns the vma, or an error pointer.
277 */
278 struct i915_vma *
279 i915_vma_instance(struct drm_i915_gem_object *obj,
280 struct i915_address_space *vm,
281 const struct i915_ggtt_view *view)
282 {
283 struct i915_vma *vma;
284
285 GEM_BUG_ON(view && !i915_is_ggtt(vm));
286 GEM_BUG_ON(!atomic_read(&vm->open));
287
288 spin_lock(&obj->vma.lock);
289 vma = vma_lookup(obj, vm, view);
290 spin_unlock(&obj->vma.lock);
291
292 /* vma_create() will resolve the race if another creates the vma */
293 if (unlikely(!vma))
294 vma = vma_create(obj, vm, view);
295
296 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
297 return vma;
298 }
299
300 struct i915_vma_work {
301 struct dma_fence_work base;
302 struct i915_vma *vma;
303 struct drm_i915_gem_object *pinned;
304 enum i915_cache_level cache_level;
305 unsigned int flags;
306 };
307
308 static int __vma_bind(struct dma_fence_work *work)
309 {
310 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
311 struct i915_vma *vma = vw->vma;
312 int err;
313
314 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
315 if (err)
316 atomic_or(I915_VMA_ERROR, &vma->flags);
317
318 return err;
319 }
320
321 static void __vma_release(struct dma_fence_work *work)
322 {
323 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
324
325 if (vw->pinned)
326 __i915_gem_object_unpin_pages(vw->pinned);
327 }
328
329 static const struct dma_fence_work_ops bind_ops = {
330 .name = "bind",
331 .work = __vma_bind,
332 .release = __vma_release,
333 };
334
335 struct i915_vma_work *i915_vma_work(void)
336 {
337 struct i915_vma_work *vw;
338
339 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
340 if (!vw)
341 return NULL;
342
343 dma_fence_work_init(&vw->base, &bind_ops);
344 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
345
346 return vw;
347 }
348
349 /**
350 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
351 * @vma: VMA to map
352 * @cache_level: mapping cache level
353 * @flags: flags like global or local mapping
354 * @work: preallocated worker for allocating and binding the PTE
355 *
356 * DMA addresses are taken from the scatter-gather table of this object (or of
357 * this VMA in case of non-default GGTT views) and PTE entries set up.
358 * Note that DMA addresses are also the only part of the SG table we care about.
359 */
360 int i915_vma_bind(struct i915_vma *vma,
361 enum i915_cache_level cache_level,
362 u32 flags,
363 struct i915_vma_work *work)
364 {
365 u32 bind_flags;
366 u32 vma_flags;
367 int ret;
368
369 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
370 GEM_BUG_ON(vma->size > vma->node.size);
371
372 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
373 vma->node.size,
374 vma->vm->total)))
375 return -ENODEV;
376
377 if (GEM_DEBUG_WARN_ON(!flags))
378 return -EINVAL;
379
380 bind_flags = flags;
381 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
382
383 vma_flags = atomic_read(&vma->flags);
384 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
385 if (flags & PIN_UPDATE)
386 bind_flags |= vma_flags;
387 else
388 bind_flags &= ~vma_flags;
389 if (bind_flags == 0)
390 return 0;
391
392 GEM_BUG_ON(!vma->pages);
393
394 trace_i915_vma_bind(vma, bind_flags);
395 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
396 work->vma = vma;
397 work->cache_level = cache_level;
398 work->flags = bind_flags | I915_VMA_ALLOC;
399
400 /*
401 * Note we only want to chain up to the migration fence on
402 * the pages (not the object itself). As we don't track that,
403 * yet, we have to use the exclusive fence instead.
404 *
405 * Also note that we do not want to track the async vma as
406 * part of the obj->resv->excl_fence as it only affects
407 * execution and not content or object's backing store lifetime.
408 */
409 GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
410 i915_active_set_exclusive(&vma->active, &work->base.dma);
411 work->base.dma.error = 0; /* enable the queue_work() */
412
413 if (vma->obj) {
414 __i915_gem_object_pin_pages(vma->obj);
415 work->pinned = vma->obj;
416 }
417 } else {
418 GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
419 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
420 if (ret)
421 return ret;
422 }
423
424 atomic_or(bind_flags, &vma->flags);
425 return 0;
426 }
427
428 #ifdef __NetBSD__
429 # define __iomem __i915_vma_iomem
430 #endif
431
432 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
433 {
434 void __iomem *ptr;
435 int err;
436
437 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
438 err = -ENODEV;
439 goto err;
440 }
441
442 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
443 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
444
445 ptr = READ_ONCE(vma->iomap);
446 if (ptr == NULL) {
447 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
448 vma->node.start,
449 vma->node.size);
450 if (ptr == NULL) {
451 err = -ENOMEM;
452 goto err;
453 }
454
455 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
456 io_mapping_unmap(ptr);
457 ptr = vma->iomap;
458 }
459 }
460
461 __i915_vma_pin(vma);
462
463 err = i915_vma_pin_fence(vma);
464 if (err)
465 goto err_unpin;
466
467 i915_vma_set_ggtt_write(vma);
468
469 /* NB Access through the GTT requires the device to be awake. */
470 return ptr;
471
472 err_unpin:
473 __i915_vma_unpin(vma);
474 err:
475 return IO_ERR_PTR(err);
476 }
477
478 #ifdef __NetBSD__
479 # undef __iomem
480 #endif
481
482 void i915_vma_flush_writes(struct i915_vma *vma)
483 {
484 if (i915_vma_unset_ggtt_write(vma))
485 intel_gt_flush_ggtt_writes(vma->vm->gt);
486 }
487
488 void i915_vma_unpin_iomap(struct i915_vma *vma)
489 {
490 GEM_BUG_ON(vma->iomap == NULL);
491
492 i915_vma_flush_writes(vma);
493
494 i915_vma_unpin_fence(vma);
495 i915_vma_unpin(vma);
496 }
497
498 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
499 {
500 struct i915_vma *vma;
501 struct drm_i915_gem_object *obj;
502
503 vma = fetch_and_zero(p_vma);
504 if (!vma)
505 return;
506
507 obj = vma->obj;
508 GEM_BUG_ON(!obj);
509
510 i915_vma_unpin(vma);
511 i915_vma_close(vma);
512
513 if (flags & I915_VMA_RELEASE_MAP)
514 i915_gem_object_unpin_map(obj);
515
516 i915_gem_object_put(obj);
517 }
518
519 bool i915_vma_misplaced(const struct i915_vma *vma,
520 u64 size, u64 alignment, u64 flags)
521 {
522 if (!drm_mm_node_allocated(&vma->node))
523 return false;
524
525 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
526 return true;
527
528 if (vma->node.size < size)
529 return true;
530
531 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
532 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
533 return true;
534
535 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
536 return true;
537
538 if (flags & PIN_OFFSET_BIAS &&
539 vma->node.start < (flags & PIN_OFFSET_MASK))
540 return true;
541
542 if (flags & PIN_OFFSET_FIXED &&
543 vma->node.start != (flags & PIN_OFFSET_MASK))
544 return true;
545
546 return false;
547 }
548
549 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
550 {
551 bool mappable, fenceable;
552
553 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
554 GEM_BUG_ON(!vma->fence_size);
555
556 fenceable = (vma->node.size >= vma->fence_size &&
557 IS_ALIGNED(vma->node.start, vma->fence_alignment));
558
559 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
560
561 if (mappable && fenceable)
562 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
563 else
564 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
565 }
566
567 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
568 {
569 struct drm_mm_node *node = &vma->node;
570 struct drm_mm_node *other;
571
572 /*
573 * On some machines we have to be careful when putting differing types
574 * of snoopable memory together to avoid the prefetcher crossing memory
575 * domains and dying. During vm initialisation, we decide whether or not
576 * these constraints apply and set the drm_mm.color_adjust
577 * appropriately.
578 */
579 if (!i915_vm_has_cache_coloring(vma->vm))
580 return true;
581
582 /* Only valid to be called on an already inserted vma */
583 GEM_BUG_ON(!drm_mm_node_allocated(node));
584 GEM_BUG_ON(list_empty(&node->node_list));
585
586 other = list_prev_entry(node, node_list);
587 if (i915_node_color_differs(other, color) &&
588 !drm_mm_hole_follows(other))
589 return false;
590
591 other = list_next_entry(node, node_list);
592 if (i915_node_color_differs(other, color) &&
593 !drm_mm_hole_follows(node))
594 return false;
595
596 return true;
597 }
598
599 static void assert_bind_count(const struct drm_i915_gem_object *obj)
600 {
601 /*
602 * Combine the assertion that the object is bound and that we have
603 * pinned its pages. But we should never have bound the object
604 * more than we have pinned its pages. (For complete accuracy, we
605 * assume that no else is pinning the pages, but as a rough assertion
606 * that we will not run into problems later, this will do!)
607 */
608 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
609 }
610
611 /**
612 * i915_vma_insert - finds a slot for the vma in its address space
613 * @vma: the vma
614 * @size: requested size in bytes (can be larger than the VMA)
615 * @alignment: required alignment
616 * @flags: mask of PIN_* flags to use
617 *
618 * First we try to allocate some free space that meets the requirements for
619 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
620 * preferrably the oldest idle entry to make room for the new VMA.
621 *
622 * Returns:
623 * 0 on success, negative error code otherwise.
624 */
625 static int
626 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
627 {
628 unsigned long color;
629 u64 start, end;
630 int ret;
631
632 GEM_BUG_ON(i915_vma_is_closed(vma));
633 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
634 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
635
636 size = max(size, vma->size);
637 alignment = max(alignment, vma->display_alignment);
638 if (flags & PIN_MAPPABLE) {
639 size = max_t(typeof(size), size, vma->fence_size);
640 alignment = max_t(typeof(alignment),
641 alignment, vma->fence_alignment);
642 }
643
644 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
645 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
646 GEM_BUG_ON(!is_power_of_2(alignment));
647
648 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
649 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
650
651 end = vma->vm->total;
652 if (flags & PIN_MAPPABLE)
653 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
654 if (flags & PIN_ZONE_4G)
655 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
656 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
657
658 /* If binding the object/GGTT view requires more space than the entire
659 * aperture has, reject it early before evicting everything in a vain
660 * attempt to find space.
661 */
662 if (size > end) {
663 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
664 size, flags & PIN_MAPPABLE ? "mappable" : "total",
665 end);
666 return -ENOSPC;
667 }
668
669 color = 0;
670 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
671 color = vma->obj->cache_level;
672
673 if (flags & PIN_OFFSET_FIXED) {
674 u64 offset = flags & PIN_OFFSET_MASK;
675 if (!IS_ALIGNED(offset, alignment) ||
676 range_overflows(offset, size, end))
677 return -EINVAL;
678
679 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
680 size, offset, color,
681 flags);
682 if (ret)
683 return ret;
684 } else {
685 /*
686 * We only support huge gtt pages through the 48b PPGTT,
687 * however we also don't want to force any alignment for
688 * objects which need to be tightly packed into the low 32bits.
689 *
690 * Note that we assume that GGTT are limited to 4GiB for the
691 * forseeable future. See also i915_ggtt_offset().
692 */
693 if (upper_32_bits(end - 1) &&
694 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
695 /*
696 * We can't mix 64K and 4K PTEs in the same page-table
697 * (2M block), and so to avoid the ugliness and
698 * complexity of coloring we opt for just aligning 64K
699 * objects to 2M.
700 */
701 u64 page_alignment =
702 rounddown_pow_of_two(vma->page_sizes.sg |
703 I915_GTT_PAGE_SIZE_2M);
704
705 /*
706 * Check we don't expand for the limited Global GTT
707 * (mappable aperture is even more precious!). This
708 * also checks that we exclude the aliasing-ppgtt.
709 */
710 GEM_BUG_ON(i915_vma_is_ggtt(vma));
711
712 alignment = max(alignment, page_alignment);
713
714 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
715 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
716 }
717
718 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
719 size, alignment, color,
720 start, end, flags);
721 if (ret)
722 return ret;
723
724 GEM_BUG_ON(vma->node.start < start);
725 GEM_BUG_ON(vma->node.start + vma->node.size > end);
726 }
727 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
728 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
729
730 if (vma->obj) {
731 struct drm_i915_gem_object *obj = vma->obj;
732
733 atomic_inc(&obj->bind_count);
734 assert_bind_count(obj);
735 }
736 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
737
738 return 0;
739 }
740
741 static void
742 i915_vma_detach(struct i915_vma *vma)
743 {
744 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
745 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
746
747 /*
748 * And finally now the object is completely decoupled from this
749 * vma, we can drop its hold on the backing storage and allow
750 * it to be reaped by the shrinker.
751 */
752 list_del(&vma->vm_link);
753 if (vma->obj) {
754 struct drm_i915_gem_object *obj = vma->obj;
755
756 assert_bind_count(obj);
757 atomic_dec(&obj->bind_count);
758 }
759 }
760
761 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
762 {
763 unsigned int bound;
764 bool pinned = true;
765
766 bound = atomic_read(&vma->flags);
767 do {
768 if (unlikely(flags & ~bound))
769 return false;
770
771 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
772 return false;
773
774 if (!(bound & I915_VMA_PIN_MASK))
775 goto unpinned;
776
777 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
778 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
779
780 return true;
781
782 unpinned:
783 /*
784 * If pin_count==0, but we are bound, check under the lock to avoid
785 * racing with a concurrent i915_vma_unbind().
786 */
787 mutex_lock(&vma->vm->mutex);
788 do {
789 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
790 pinned = false;
791 break;
792 }
793
794 if (unlikely(flags & ~bound)) {
795 pinned = false;
796 break;
797 }
798 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
799 mutex_unlock(&vma->vm->mutex);
800
801 return pinned;
802 }
803
804 static int vma_get_pages(struct i915_vma *vma)
805 {
806 int err = 0;
807
808 if (atomic_add_unless(&vma->pages_count, 1, 0))
809 return 0;
810
811 /* Allocations ahoy! */
812 if (mutex_lock_interruptible(&vma->pages_mutex))
813 return -EINTR;
814
815 if (!atomic_read(&vma->pages_count)) {
816 if (vma->obj) {
817 err = i915_gem_object_pin_pages(vma->obj);
818 if (err)
819 goto unlock;
820 }
821
822 err = vma->ops->set_pages(vma);
823 if (err) {
824 if (vma->obj)
825 i915_gem_object_unpin_pages(vma->obj);
826 goto unlock;
827 }
828 }
829 atomic_inc(&vma->pages_count);
830
831 unlock:
832 mutex_unlock(&vma->pages_mutex);
833
834 return err;
835 }
836
837 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
838 {
839 /* We allocate under vma_get_pages, so beware the shrinker */
840 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
841 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
842 if (atomic_sub_return(count, &vma->pages_count) == 0) {
843 vma->ops->clear_pages(vma);
844 GEM_BUG_ON(vma->pages);
845 if (vma->obj)
846 i915_gem_object_unpin_pages(vma->obj);
847 }
848 mutex_unlock(&vma->pages_mutex);
849 }
850
851 static void vma_put_pages(struct i915_vma *vma)
852 {
853 if (atomic_add_unless(&vma->pages_count, -1, 1))
854 return;
855
856 __vma_put_pages(vma, 1);
857 }
858
859 static void vma_unbind_pages(struct i915_vma *vma)
860 {
861 unsigned int count;
862
863 lockdep_assert_held(&vma->vm->mutex);
864
865 /* The upper portion of pages_count is the number of bindings */
866 count = atomic_read(&vma->pages_count);
867 count >>= I915_VMA_PAGES_BIAS;
868 GEM_BUG_ON(!count);
869
870 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
871 }
872
873 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
874 {
875 struct i915_vma_work *work = NULL;
876 intel_wakeref_t wakeref = 0;
877 unsigned int bound;
878 int err;
879
880 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
881 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
882
883 GEM_BUG_ON(flags & PIN_UPDATE);
884 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
885
886 /* First try and grab the pin without rebinding the vma */
887 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
888 return 0;
889
890 err = vma_get_pages(vma);
891 if (err)
892 return err;
893
894 if (flags & vma->vm->bind_async_flags) {
895 work = i915_vma_work();
896 if (!work) {
897 err = -ENOMEM;
898 goto err_pages;
899 }
900 }
901
902 if (flags & PIN_GLOBAL)
903 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
904
905 /* No more allocations allowed once we hold vm->mutex */
906 err = mutex_lock_interruptible(&vma->vm->mutex);
907 if (err)
908 goto err_fence;
909
910 bound = atomic_read(&vma->flags);
911 if (unlikely(bound & I915_VMA_ERROR)) {
912 err = -ENOMEM;
913 goto err_unlock;
914 }
915
916 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
917 err = -EAGAIN; /* pins are meant to be fairly temporary */
918 goto err_unlock;
919 }
920
921 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
922 __i915_vma_pin(vma);
923 goto err_unlock;
924 }
925
926 err = i915_active_acquire(&vma->active);
927 if (err)
928 goto err_unlock;
929
930 if (!(bound & I915_VMA_BIND_MASK)) {
931 err = i915_vma_insert(vma, size, alignment, flags);
932 if (err)
933 goto err_active;
934
935 if (i915_is_ggtt(vma->vm))
936 __i915_vma_set_map_and_fenceable(vma);
937 }
938
939 GEM_BUG_ON(!vma->pages);
940 err = i915_vma_bind(vma,
941 vma->obj ? vma->obj->cache_level : 0,
942 flags, work);
943 if (err)
944 goto err_remove;
945
946 /* There should only be at most 2 active bindings (user, global) */
947 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
948 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
949 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
950
951 __i915_vma_pin(vma);
952 GEM_BUG_ON(!i915_vma_is_pinned(vma));
953 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
954 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
955
956 err_remove:
957 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
958 i915_vma_detach(vma);
959 drm_mm_remove_node(&vma->node);
960 }
961 err_active:
962 i915_active_release(&vma->active);
963 err_unlock:
964 mutex_unlock(&vma->vm->mutex);
965 err_fence:
966 if (work)
967 dma_fence_work_commit(&work->base);
968 if (wakeref)
969 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
970 err_pages:
971 vma_put_pages(vma);
972 return err;
973 }
974
975 static void flush_idle_contexts(struct intel_gt *gt)
976 {
977 struct intel_engine_cs *engine;
978 enum intel_engine_id id;
979
980 for_each_engine(engine, gt, id)
981 intel_engine_flush_barriers(engine);
982
983 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
984 }
985
986 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
987 {
988 struct i915_address_space *vm = vma->vm;
989 int err;
990
991 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
992
993 do {
994 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
995 if (err != -ENOSPC)
996 return err;
997
998 /* Unlike i915_vma_pin, we don't take no for an answer! */
999 flush_idle_contexts(vm->gt);
1000 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1001 i915_gem_evict_vm(vm);
1002 mutex_unlock(&vm->mutex);
1003 }
1004 } while (1);
1005 }
1006
1007 void i915_vma_close(struct i915_vma *vma)
1008 {
1009 struct intel_gt *gt = vma->vm->gt;
1010 unsigned long flags;
1011
1012 GEM_BUG_ON(i915_vma_is_closed(vma));
1013
1014 /*
1015 * We defer actually closing, unbinding and destroying the VMA until
1016 * the next idle point, or if the object is freed in the meantime. By
1017 * postponing the unbind, we allow for it to be resurrected by the
1018 * client, avoiding the work required to rebind the VMA. This is
1019 * advantageous for DRI, where the client/server pass objects
1020 * between themselves, temporarily opening a local VMA to the
1021 * object, and then closing it again. The same object is then reused
1022 * on the next frame (or two, depending on the depth of the swap queue)
1023 * causing us to rebind the VMA once more. This ends up being a lot
1024 * of wasted work for the steady state.
1025 */
1026 spin_lock_irqsave(>->closed_lock, flags);
1027 list_add(&vma->closed_link, >->closed_vma);
1028 spin_unlock_irqrestore(>->closed_lock, flags);
1029 }
1030
1031 static void __i915_vma_remove_closed(struct i915_vma *vma)
1032 {
1033 struct intel_gt *gt = vma->vm->gt;
1034
1035 spin_lock_irq(>->closed_lock);
1036 list_del_init(&vma->closed_link);
1037 spin_unlock_irq(>->closed_lock);
1038 }
1039
1040 void i915_vma_reopen(struct i915_vma *vma)
1041 {
1042 if (i915_vma_is_closed(vma))
1043 __i915_vma_remove_closed(vma);
1044 }
1045
1046 void i915_vma_release(struct kref *ref)
1047 {
1048 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1049
1050 if (drm_mm_node_allocated(&vma->node)) {
1051 mutex_lock(&vma->vm->mutex);
1052 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1053 WARN_ON(__i915_vma_unbind(vma));
1054 mutex_unlock(&vma->vm->mutex);
1055 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1056 }
1057 GEM_BUG_ON(i915_vma_is_active(vma));
1058
1059 if (vma->obj) {
1060 struct drm_i915_gem_object *obj = vma->obj;
1061
1062 spin_lock(&obj->vma.lock);
1063 list_del(&vma->obj_link);
1064 rb_erase(&vma->obj_node, &obj->vma.tree);
1065 spin_unlock(&obj->vma.lock);
1066 }
1067
1068 __i915_vma_remove_closed(vma);
1069 i915_vm_put(vma->vm);
1070
1071 i915_active_fini(&vma->active);
1072 i915_vma_free(vma);
1073 }
1074
1075 void i915_vma_parked(struct intel_gt *gt)
1076 {
1077 struct i915_vma *vma, *next;
1078
1079 spin_lock_irq(>->closed_lock);
1080 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1081 struct drm_i915_gem_object *obj = vma->obj;
1082 struct i915_address_space *vm = vma->vm;
1083
1084 /* XXX All to avoid keeping a reference on i915_vma itself */
1085
1086 if (!kref_get_unless_zero(&obj->base.refcount))
1087 continue;
1088
1089 if (i915_vm_tryopen(vm)) {
1090 list_del_init(&vma->closed_link);
1091 } else {
1092 i915_gem_object_put(obj);
1093 obj = NULL;
1094 }
1095
1096 spin_unlock_irq(>->closed_lock);
1097
1098 if (obj) {
1099 __i915_vma_put(vma);
1100 i915_gem_object_put(obj);
1101 }
1102
1103 i915_vm_close(vm);
1104
1105 /* Restart after dropping lock */
1106 spin_lock_irq(>->closed_lock);
1107 next = list_first_entry(>->closed_vma,
1108 typeof(*next), closed_link);
1109 }
1110 spin_unlock_irq(>->closed_lock);
1111 }
1112
1113 static void __i915_vma_iounmap(struct i915_vma *vma)
1114 {
1115 GEM_BUG_ON(i915_vma_is_pinned(vma));
1116
1117 if (vma->iomap == NULL)
1118 return;
1119
1120 io_mapping_unmap(vma->iomap);
1121 vma->iomap = NULL;
1122 }
1123
1124 void i915_vma_revoke_mmap(struct i915_vma *vma)
1125 {
1126 struct drm_vma_offset_node *node;
1127 u64 vma_offset;
1128
1129 if (!i915_vma_has_userfault(vma))
1130 return;
1131
1132 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1133 GEM_BUG_ON(!vma->obj->userfault_count);
1134
1135 node = &vma->mmo->vma_node;
1136 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1137 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1138 drm_vma_node_offset_addr(node) + vma_offset,
1139 vma->size,
1140 1);
1141
1142 i915_vma_unset_userfault(vma);
1143 if (!--vma->obj->userfault_count)
1144 list_del(&vma->obj->userfault_link);
1145 }
1146
1147 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1148 {
1149 int err;
1150
1151 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1152
1153 /* Wait for the vma to be bound before we start! */
1154 err = i915_request_await_active(rq, &vma->active);
1155 if (err)
1156 return err;
1157
1158 return i915_active_add_request(&vma->active, rq);
1159 }
1160
1161 int i915_vma_move_to_active(struct i915_vma *vma,
1162 struct i915_request *rq,
1163 unsigned int flags)
1164 {
1165 struct drm_i915_gem_object *obj = vma->obj;
1166 int err;
1167
1168 assert_object_held(obj);
1169
1170 err = __i915_vma_move_to_active(vma, rq);
1171 if (unlikely(err))
1172 return err;
1173
1174 if (flags & EXEC_OBJECT_WRITE) {
1175 struct intel_frontbuffer *front;
1176
1177 front = __intel_frontbuffer_get(obj);
1178 if (unlikely(front)) {
1179 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1180 i915_active_add_request(&front->write, rq);
1181 intel_frontbuffer_put(front);
1182 }
1183
1184 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1185 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1186 obj->read_domains = 0;
1187 } else {
1188 err = dma_resv_reserve_shared(vma->resv, 1);
1189 if (unlikely(err))
1190 return err;
1191
1192 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1193 obj->write_domain = 0;
1194 }
1195 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1196 obj->mm.dirty = true;
1197
1198 GEM_BUG_ON(!i915_vma_is_active(vma));
1199 return 0;
1200 }
1201
1202 int __i915_vma_unbind(struct i915_vma *vma)
1203 {
1204 int ret;
1205
1206 lockdep_assert_held(&vma->vm->mutex);
1207
1208 /*
1209 * First wait upon any activity as retiring the request may
1210 * have side-effects such as unpinning or even unbinding this vma.
1211 *
1212 * XXX Actually waiting under the vm->mutex is a hinderance and
1213 * should be pipelined wherever possible. In cases where that is
1214 * unavoidable, we should lift the wait to before the mutex.
1215 */
1216 ret = i915_vma_sync(vma);
1217 if (ret)
1218 return ret;
1219
1220 if (i915_vma_is_pinned(vma)) {
1221 vma_print_allocator(vma, "is pinned");
1222 return -EAGAIN;
1223 }
1224
1225 /*
1226 * After confirming that no one else is pinning this vma, wait for
1227 * any laggards who may have crept in during the wait (through
1228 * a residual pin skipping the vm->mutex) to complete.
1229 */
1230 ret = i915_vma_sync(vma);
1231 if (ret)
1232 return ret;
1233
1234 if (!drm_mm_node_allocated(&vma->node))
1235 return 0;
1236
1237 GEM_BUG_ON(i915_vma_is_pinned(vma));
1238 GEM_BUG_ON(i915_vma_is_active(vma));
1239
1240 if (i915_vma_is_map_and_fenceable(vma)) {
1241 /*
1242 * Check that we have flushed all writes through the GGTT
1243 * before the unbind, other due to non-strict nature of those
1244 * indirect writes they may end up referencing the GGTT PTE
1245 * after the unbind.
1246 */
1247 i915_vma_flush_writes(vma);
1248 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1249
1250 /* release the fence reg _after_ flushing */
1251 ret = i915_vma_revoke_fence(vma);
1252 if (ret)
1253 return ret;
1254
1255 /* Force a pagefault for domain tracking on next user access */
1256 i915_vma_revoke_mmap(vma);
1257
1258 __i915_vma_iounmap(vma);
1259 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1260 }
1261 GEM_BUG_ON(vma->fence);
1262 GEM_BUG_ON(i915_vma_has_userfault(vma));
1263
1264 if (likely(atomic_read(&vma->vm->open))) {
1265 trace_i915_vma_unbind(vma);
1266 vma->ops->unbind_vma(vma);
1267 }
1268 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
1269
1270 i915_vma_detach(vma);
1271 vma_unbind_pages(vma);
1272
1273 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1274 return 0;
1275 }
1276
1277 int i915_vma_unbind(struct i915_vma *vma)
1278 {
1279 struct i915_address_space *vm = vma->vm;
1280 intel_wakeref_t wakeref = 0;
1281 int err;
1282
1283 if (!drm_mm_node_allocated(&vma->node))
1284 return 0;
1285
1286 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1287 /* XXX not always required: nop_clear_range */
1288 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1289
1290 err = mutex_lock_interruptible(&vm->mutex);
1291 if (err)
1292 return err;
1293
1294 err = __i915_vma_unbind(vma);
1295 mutex_unlock(&vm->mutex);
1296
1297 if (wakeref)
1298 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1299
1300 return err;
1301 }
1302
1303 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1304 {
1305 i915_gem_object_make_unshrinkable(vma->obj);
1306 return vma;
1307 }
1308
1309 void i915_vma_make_shrinkable(struct i915_vma *vma)
1310 {
1311 i915_gem_object_make_shrinkable(vma->obj);
1312 }
1313
1314 void i915_vma_make_purgeable(struct i915_vma *vma)
1315 {
1316 i915_gem_object_make_purgeable(vma->obj);
1317 }
1318
1319 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1320 #include "selftests/i915_vma.c"
1321 #endif
1322
1323 static void i915_global_vma_shrink(void)
1324 {
1325 kmem_cache_shrink(global.slab_vmas);
1326 }
1327
1328 static void i915_global_vma_exit(void)
1329 {
1330 kmem_cache_destroy(global.slab_vmas);
1331 }
1332
1333 static struct i915_global_vma global = { {
1334 .shrink = i915_global_vma_shrink,
1335 .exit = i915_global_vma_exit,
1336 } };
1337
1338 int __init i915_global_vma_init(void)
1339 {
1340 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1341 if (!global.slab_vmas)
1342 return -ENOMEM;
1343
1344 i915_global_register(&global.base);
1345 return 0;
1346 }
1347