i915_gem_evict.c revision 1.1.1.2.28.1 1 /* $NetBSD: i915_gem_evict.c,v 1.1.1.2.28.1 2018/09/06 06:56:17 pgoyette Exp $ */
2
3 /*
4 * Copyright 2008-2010 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 * Eric Anholt <eric (at) anholt.net>
27 * Chris Wilson <chris (at) chris-wilson.co.uuk>
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: i915_gem_evict.c,v 1.1.1.2.28.1 2018/09/06 06:56:17 pgoyette Exp $");
33
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36
37 #include "i915_drv.h"
38 #include "intel_drv.h"
39 #include "i915_trace.h"
40
41 static bool
42 mark_free(struct i915_vma *vma, struct list_head *unwind)
43 {
44 if (vma->pin_count)
45 return false;
46
47 if (WARN_ON(!list_empty(&vma->exec_list)))
48 return false;
49
50 list_add(&vma->exec_list, unwind);
51 return drm_mm_scan_add_block(&vma->node);
52 }
53
54 /**
55 * i915_gem_evict_something - Evict vmas to make room for binding a new one
56 * @dev: drm_device
57 * @vm: address space to evict from
58 * @min_size: size of the desired free space
59 * @alignment: alignment constraint of the desired free space
60 * @cache_level: cache_level for the desired space
61 * @start: start (inclusive) of the range from which to evict objects
62 * @end: end (exclusive) of the range from which to evict objects
63 * @flags: additional flags to control the eviction algorithm
64 *
65 * This function will try to evict vmas until a free space satisfying the
66 * requirements is found. Callers must check first whether any such hole exists
67 * already before calling this function.
68 *
69 * This function is used by the object/vma binding code.
70 *
71 * Since this function is only used to free up virtual address space it only
72 * ignores pinned vmas, and not object where the backing storage itself is
73 * pinned. Hence obj->pages_pin_count does not protect against eviction.
74 *
75 * To clarify: This is for freeing up virtual address space, not for freeing
76 * memory in e.g. the shrinker.
77 */
78 int
79 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
80 int min_size, unsigned alignment, unsigned cache_level,
81 unsigned long start, unsigned long end,
82 unsigned flags)
83 {
84 struct list_head eviction_list, unwind_list;
85 struct i915_vma *vma;
86 int ret = 0;
87 int pass = 0;
88
89 trace_i915_gem_evict(dev, min_size, alignment, flags);
90
91 /*
92 * The goal is to evict objects and amalgamate space in LRU order.
93 * The oldest idle objects reside on the inactive list, which is in
94 * retirement order. The next objects to retire are those on the (per
95 * ring) active list that do not have an outstanding flush. Once the
96 * hardware reports completion (the seqno is updated after the
97 * batchbuffer has been finished) the clean buffer objects would
98 * be retired to the inactive list. Any dirty objects would be added
99 * to the tail of the flushing list. So after processing the clean
100 * active objects we need to emit a MI_FLUSH to retire the flushing
101 * list, hence the retirement order of the flushing list is in
102 * advance of the dirty objects on the active lists.
103 *
104 * The retirement sequence is thus:
105 * 1. Inactive objects (already retired)
106 * 2. Clean active objects
107 * 3. Flushing list
108 * 4. Dirty active objects.
109 *
110 * On each list, the oldest objects lie at the HEAD with the freshest
111 * object on the TAIL.
112 */
113
114 INIT_LIST_HEAD(&unwind_list);
115 if (start != 0 || end != vm->total) {
116 drm_mm_init_scan_with_range(&vm->mm, min_size,
117 alignment, cache_level,
118 start, end);
119 } else
120 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
121
122 search_again:
123 /* First see if there is a large enough contiguous idle region... */
124 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
125 if (mark_free(vma, &unwind_list))
126 goto found;
127 }
128
129 if (flags & PIN_NONBLOCK)
130 goto none;
131
132 /* Now merge in the soon-to-be-expired objects... */
133 list_for_each_entry(vma, &vm->active_list, mm_list) {
134 if (mark_free(vma, &unwind_list))
135 goto found;
136 }
137
138 none:
139 /* Nothing found, clean up and bail out! */
140 while (!list_empty(&unwind_list)) {
141 vma = list_first_entry(&unwind_list,
142 struct i915_vma,
143 exec_list);
144 ret = drm_mm_scan_remove_block(&vma->node);
145 BUG_ON(ret);
146
147 list_del_init(&vma->exec_list);
148 }
149
150 /* Can we unpin some objects such as idle hw contents,
151 * or pending flips?
152 */
153 if (flags & PIN_NONBLOCK)
154 return -ENOSPC;
155
156 /* Only idle the GPU and repeat the search once */
157 if (pass++ == 0) {
158 ret = i915_gpu_idle(dev);
159 if (ret)
160 return ret;
161
162 i915_gem_retire_requests(dev);
163 goto search_again;
164 }
165
166 /* If we still have pending pageflip completions, drop
167 * back to userspace to give our workqueues time to
168 * acquire our locks and unpin the old scanouts.
169 */
170 return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
171
172 found:
173 /* drm_mm doesn't allow any other other operations while
174 * scanning, therefore store to be evicted objects on a
175 * temporary list. */
176 INIT_LIST_HEAD(&eviction_list);
177 while (!list_empty(&unwind_list)) {
178 vma = list_first_entry(&unwind_list,
179 struct i915_vma,
180 exec_list);
181 if (drm_mm_scan_remove_block(&vma->node)) {
182 list_move(&vma->exec_list, &eviction_list);
183 drm_gem_object_reference(&vma->obj->base);
184 continue;
185 }
186 list_del_init(&vma->exec_list);
187 }
188
189 /* Unbinding will emit any required flushes */
190 while (!list_empty(&eviction_list)) {
191 struct drm_gem_object *obj;
192 vma = list_first_entry(&eviction_list,
193 struct i915_vma,
194 exec_list);
195
196 obj = &vma->obj->base;
197 list_del_init(&vma->exec_list);
198 if (ret == 0)
199 ret = i915_vma_unbind(vma);
200
201 drm_gem_object_unreference(obj);
202 }
203
204 return ret;
205 }
206
207 /**
208 * i915_gem_evict_vm - Evict all idle vmas from a vm
209 * @vm: Address space to cleanse
210 * @do_idle: Boolean directing whether to idle first.
211 *
212 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
213 * evicted the @do_idle needs to be set to true.
214 *
215 * This is used by the execbuf code as a last-ditch effort to defragment the
216 * address space.
217 *
218 * To clarify: This is for freeing up virtual address space, not for freeing
219 * memory in e.g. the shrinker.
220 */
221 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
222 {
223 struct i915_vma *vma, *next;
224 int ret;
225
226 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
227 trace_i915_gem_evict_vm(vm);
228
229 if (do_idle) {
230 ret = i915_gpu_idle(vm->dev);
231 if (ret)
232 return ret;
233
234 i915_gem_retire_requests(vm->dev);
235
236 WARN_ON(!list_empty(&vm->active_list));
237 }
238
239 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
240 if (vma->pin_count == 0)
241 WARN_ON(i915_vma_unbind(vma));
242
243 return 0;
244 }
245