drm_mm.c revision 1.4 1 1.4 riastrad /* $NetBSD: drm_mm.c,v 1.4 2018/08/27 04:58:19 riastradh Exp $ */
2 1.4 riastrad
3 1.1 riastrad /**************************************************************************
4 1.1 riastrad *
5 1.1 riastrad * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 1.1 riastrad * All Rights Reserved.
7 1.1 riastrad *
8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a
9 1.1 riastrad * copy of this software and associated documentation files (the
10 1.1 riastrad * "Software"), to deal in the Software without restriction, including
11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish,
12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to
13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to
14 1.1 riastrad * the following conditions:
15 1.1 riastrad *
16 1.1 riastrad * The above copyright notice and this permission notice (including the
17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions
18 1.1 riastrad * of the Software.
19 1.1 riastrad *
20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 1.1 riastrad *
28 1.1 riastrad *
29 1.1 riastrad **************************************************************************/
30 1.1 riastrad
31 1.1 riastrad /*
32 1.1 riastrad * Generic simple memory manager implementation. Intended to be used as a base
33 1.1 riastrad * class implementation for more advanced memory managers.
34 1.1 riastrad *
35 1.1 riastrad * Note that the algorithm used is quite simple and there might be substantial
36 1.1 riastrad * performance gains if a smarter free list is implemented. Currently it is just an
37 1.1 riastrad * unordered stack of free regions. This could easily be improved if an RB-tree
38 1.1 riastrad * is used instead. At least if we expect heavy fragmentation.
39 1.1 riastrad *
40 1.1 riastrad * Aligned allocations can also see improvement.
41 1.1 riastrad *
42 1.1 riastrad * Authors:
43 1.1 riastrad * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
44 1.1 riastrad */
45 1.1 riastrad
46 1.4 riastrad #include <sys/cdefs.h>
47 1.4 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.4 2018/08/27 04:58:19 riastradh Exp $");
48 1.4 riastrad
49 1.1 riastrad #include <drm/drmP.h>
50 1.1 riastrad #include <drm/drm_mm.h>
51 1.1 riastrad #include <linux/slab.h>
52 1.1 riastrad #include <linux/seq_file.h>
53 1.1 riastrad #include <linux/export.h>
54 1.2 riastrad #include <linux/printk.h>
55 1.2 riastrad #include <asm/bug.h>
56 1.1 riastrad
57 1.3 riastrad /**
58 1.3 riastrad * DOC: Overview
59 1.3 riastrad *
60 1.3 riastrad * drm_mm provides a simple range allocator. The drivers are free to use the
61 1.3 riastrad * resource allocator from the linux core if it suits them, the upside of drm_mm
62 1.3 riastrad * is that it's in the DRM core. Which means that it's easier to extend for
63 1.3 riastrad * some of the crazier special purpose needs of gpus.
64 1.3 riastrad *
65 1.3 riastrad * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
66 1.3 riastrad * Drivers are free to embed either of them into their own suitable
67 1.3 riastrad * datastructures. drm_mm itself will not do any allocations of its own, so if
68 1.3 riastrad * drivers choose not to embed nodes they need to still allocate them
69 1.3 riastrad * themselves.
70 1.3 riastrad *
71 1.3 riastrad * The range allocator also supports reservation of preallocated blocks. This is
72 1.3 riastrad * useful for taking over initial mode setting configurations from the firmware,
73 1.3 riastrad * where an object needs to be created which exactly matches the firmware's
74 1.3 riastrad * scanout target. As long as the range is still free it can be inserted anytime
75 1.3 riastrad * after the allocator is initialized, which helps with avoiding looped
76 1.3 riastrad * depencies in the driver load sequence.
77 1.3 riastrad *
78 1.3 riastrad * drm_mm maintains a stack of most recently freed holes, which of all
79 1.3 riastrad * simplistic datastructures seems to be a fairly decent approach to clustering
80 1.3 riastrad * allocations and avoiding too much fragmentation. This means free space
81 1.3 riastrad * searches are O(num_holes). Given that all the fancy features drm_mm supports
82 1.3 riastrad * something better would be fairly complex and since gfx thrashing is a fairly
83 1.3 riastrad * steep cliff not a real concern. Removing a node again is O(1).
84 1.3 riastrad *
85 1.3 riastrad * drm_mm supports a few features: Alignment and range restrictions can be
86 1.3 riastrad * supplied. Further more every &drm_mm_node has a color value (which is just an
87 1.3 riastrad * opaqua unsigned long) which in conjunction with a driver callback can be used
88 1.3 riastrad * to implement sophisticated placement restrictions. The i915 DRM driver uses
89 1.3 riastrad * this to implement guard pages between incompatible caching domains in the
90 1.3 riastrad * graphics TT.
91 1.3 riastrad *
92 1.3 riastrad * Two behaviors are supported for searching and allocating: bottom-up and top-down.
93 1.3 riastrad * The default is bottom-up. Top-down allocation can be used if the memory area
94 1.3 riastrad * has different restrictions, or just to reduce fragmentation.
95 1.1 riastrad *
96 1.3 riastrad * Finally iteration helpers to walk all nodes and all holes are provided as are
97 1.3 riastrad * some basic allocator dumpers for debugging.
98 1.1 riastrad */
99 1.1 riastrad
100 1.3 riastrad static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
101 1.4 riastrad u64 size,
102 1.3 riastrad unsigned alignment,
103 1.3 riastrad unsigned long color,
104 1.3 riastrad enum drm_mm_search_flags flags);
105 1.3 riastrad static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
106 1.4 riastrad u64 size,
107 1.3 riastrad unsigned alignment,
108 1.3 riastrad unsigned long color,
109 1.4 riastrad u64 start,
110 1.4 riastrad u64 end,
111 1.3 riastrad enum drm_mm_search_flags flags);
112 1.1 riastrad
113 1.1 riastrad static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
114 1.1 riastrad struct drm_mm_node *node,
115 1.4 riastrad u64 size, unsigned alignment,
116 1.3 riastrad unsigned long color,
117 1.3 riastrad enum drm_mm_allocator_flags flags)
118 1.1 riastrad {
119 1.1 riastrad struct drm_mm *mm = hole_node->mm;
120 1.4 riastrad u64 hole_start = drm_mm_hole_node_start(hole_node);
121 1.4 riastrad u64 hole_end = drm_mm_hole_node_end(hole_node);
122 1.4 riastrad u64 adj_start = hole_start;
123 1.4 riastrad u64 adj_end = hole_end;
124 1.1 riastrad
125 1.3 riastrad BUG_ON(node->allocated);
126 1.1 riastrad
127 1.1 riastrad if (mm->color_adjust)
128 1.1 riastrad mm->color_adjust(hole_node, color, &adj_start, &adj_end);
129 1.1 riastrad
130 1.3 riastrad if (flags & DRM_MM_CREATE_TOP)
131 1.3 riastrad adj_start = adj_end - size;
132 1.3 riastrad
133 1.1 riastrad if (alignment) {
134 1.4 riastrad u64 tmp = adj_start;
135 1.4 riastrad unsigned rem;
136 1.4 riastrad
137 1.4 riastrad rem = do_div(tmp, alignment);
138 1.4 riastrad if (rem) {
139 1.3 riastrad if (flags & DRM_MM_CREATE_TOP)
140 1.4 riastrad adj_start -= rem;
141 1.3 riastrad else
142 1.4 riastrad adj_start += alignment - rem;
143 1.3 riastrad }
144 1.1 riastrad }
145 1.1 riastrad
146 1.3 riastrad BUG_ON(adj_start < hole_start);
147 1.3 riastrad BUG_ON(adj_end > hole_end);
148 1.3 riastrad
149 1.1 riastrad if (adj_start == hole_start) {
150 1.1 riastrad hole_node->hole_follows = 0;
151 1.1 riastrad list_del(&hole_node->hole_stack);
152 1.1 riastrad }
153 1.1 riastrad
154 1.1 riastrad node->start = adj_start;
155 1.1 riastrad node->size = size;
156 1.1 riastrad node->mm = mm;
157 1.1 riastrad node->color = color;
158 1.1 riastrad node->allocated = 1;
159 1.1 riastrad
160 1.1 riastrad INIT_LIST_HEAD(&node->hole_stack);
161 1.1 riastrad list_add(&node->node_list, &hole_node->node_list);
162 1.1 riastrad
163 1.1 riastrad BUG_ON(node->start + node->size > adj_end);
164 1.1 riastrad
165 1.1 riastrad node->hole_follows = 0;
166 1.3 riastrad if (__drm_mm_hole_node_start(node) < hole_end) {
167 1.1 riastrad list_add(&node->hole_stack, &mm->hole_stack);
168 1.1 riastrad node->hole_follows = 1;
169 1.1 riastrad }
170 1.1 riastrad }
171 1.1 riastrad
172 1.3 riastrad /**
173 1.3 riastrad * drm_mm_reserve_node - insert an pre-initialized node
174 1.3 riastrad * @mm: drm_mm allocator to insert @node into
175 1.3 riastrad * @node: drm_mm_node to insert
176 1.3 riastrad *
177 1.3 riastrad * This functions inserts an already set-up drm_mm_node into the allocator,
178 1.3 riastrad * meaning that start, size and color must be set by the caller. This is useful
179 1.3 riastrad * to initialize the allocator with preallocated objects which must be set-up
180 1.3 riastrad * before the range allocator can be set-up, e.g. when taking over a firmware
181 1.3 riastrad * framebuffer.
182 1.3 riastrad *
183 1.3 riastrad * Returns:
184 1.3 riastrad * 0 on success, -ENOSPC if there's no hole where @node is.
185 1.3 riastrad */
186 1.3 riastrad int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
187 1.1 riastrad {
188 1.3 riastrad struct drm_mm_node *hole;
189 1.4 riastrad u64 end = node->start + node->size;
190 1.4 riastrad u64 hole_start;
191 1.4 riastrad u64 hole_end;
192 1.3 riastrad
193 1.3 riastrad BUG_ON(node == NULL);
194 1.3 riastrad
195 1.3 riastrad /* Find the relevant hole to add our node to */
196 1.3 riastrad drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
197 1.3 riastrad if (hole_start > node->start || hole_end < end)
198 1.3 riastrad continue;
199 1.1 riastrad
200 1.3 riastrad node->mm = mm;
201 1.3 riastrad node->allocated = 1;
202 1.1 riastrad
203 1.3 riastrad INIT_LIST_HEAD(&node->hole_stack);
204 1.3 riastrad list_add(&node->node_list, &hole->node_list);
205 1.1 riastrad
206 1.3 riastrad if (node->start == hole_start) {
207 1.3 riastrad hole->hole_follows = 0;
208 1.3 riastrad list_del_init(&hole->hole_stack);
209 1.3 riastrad }
210 1.3 riastrad
211 1.3 riastrad node->hole_follows = 0;
212 1.3 riastrad if (end != hole_end) {
213 1.3 riastrad list_add(&node->hole_stack, &mm->hole_stack);
214 1.3 riastrad node->hole_follows = 1;
215 1.3 riastrad }
216 1.3 riastrad
217 1.3 riastrad return 0;
218 1.3 riastrad }
219 1.3 riastrad
220 1.3 riastrad return -ENOSPC;
221 1.1 riastrad }
222 1.3 riastrad EXPORT_SYMBOL(drm_mm_reserve_node);
223 1.1 riastrad
224 1.1 riastrad /**
225 1.3 riastrad * drm_mm_insert_node_generic - search for space and insert @node
226 1.3 riastrad * @mm: drm_mm to allocate from
227 1.3 riastrad * @node: preallocate node to insert
228 1.3 riastrad * @size: size of the allocation
229 1.3 riastrad * @alignment: alignment of the allocation
230 1.3 riastrad * @color: opaque tag value to use for this node
231 1.3 riastrad * @sflags: flags to fine-tune the allocation search
232 1.3 riastrad * @aflags: flags to fine-tune the allocation behavior
233 1.3 riastrad *
234 1.3 riastrad * The preallocated node must be cleared to 0.
235 1.3 riastrad *
236 1.3 riastrad * Returns:
237 1.3 riastrad * 0 on success, -ENOSPC if there's no suitable hole.
238 1.1 riastrad */
239 1.1 riastrad int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
240 1.4 riastrad u64 size, unsigned alignment,
241 1.3 riastrad unsigned long color,
242 1.3 riastrad enum drm_mm_search_flags sflags,
243 1.3 riastrad enum drm_mm_allocator_flags aflags)
244 1.1 riastrad {
245 1.1 riastrad struct drm_mm_node *hole_node;
246 1.1 riastrad
247 1.1 riastrad hole_node = drm_mm_search_free_generic(mm, size, alignment,
248 1.3 riastrad color, sflags);
249 1.1 riastrad if (!hole_node)
250 1.1 riastrad return -ENOSPC;
251 1.1 riastrad
252 1.3 riastrad drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
253 1.1 riastrad return 0;
254 1.1 riastrad }
255 1.1 riastrad EXPORT_SYMBOL(drm_mm_insert_node_generic);
256 1.1 riastrad
257 1.1 riastrad static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
258 1.1 riastrad struct drm_mm_node *node,
259 1.4 riastrad u64 size, unsigned alignment,
260 1.1 riastrad unsigned long color,
261 1.4 riastrad u64 start, u64 end,
262 1.3 riastrad enum drm_mm_allocator_flags flags)
263 1.1 riastrad {
264 1.1 riastrad struct drm_mm *mm = hole_node->mm;
265 1.4 riastrad u64 hole_start = drm_mm_hole_node_start(hole_node);
266 1.4 riastrad u64 hole_end = drm_mm_hole_node_end(hole_node);
267 1.4 riastrad u64 adj_start = hole_start;
268 1.4 riastrad u64 adj_end = hole_end;
269 1.1 riastrad
270 1.1 riastrad BUG_ON(!hole_node->hole_follows || node->allocated);
271 1.1 riastrad
272 1.4 riastrad if (mm->color_adjust)
273 1.4 riastrad mm->color_adjust(hole_node, color, &adj_start, &adj_end);
274 1.4 riastrad
275 1.4 riastrad adj_start = max(adj_start, start);
276 1.4 riastrad adj_end = min(adj_end, end);
277 1.1 riastrad
278 1.3 riastrad if (flags & DRM_MM_CREATE_TOP)
279 1.3 riastrad adj_start = adj_end - size;
280 1.3 riastrad
281 1.4 riastrad if (alignment) {
282 1.4 riastrad u64 tmp = adj_start;
283 1.4 riastrad unsigned rem;
284 1.1 riastrad
285 1.4 riastrad rem = do_div(tmp, alignment);
286 1.4 riastrad if (rem) {
287 1.3 riastrad if (flags & DRM_MM_CREATE_TOP)
288 1.4 riastrad adj_start -= rem;
289 1.3 riastrad else
290 1.4 riastrad adj_start += alignment - rem;
291 1.3 riastrad }
292 1.1 riastrad }
293 1.1 riastrad
294 1.1 riastrad if (adj_start == hole_start) {
295 1.1 riastrad hole_node->hole_follows = 0;
296 1.1 riastrad list_del(&hole_node->hole_stack);
297 1.1 riastrad }
298 1.1 riastrad
299 1.1 riastrad node->start = adj_start;
300 1.1 riastrad node->size = size;
301 1.1 riastrad node->mm = mm;
302 1.1 riastrad node->color = color;
303 1.1 riastrad node->allocated = 1;
304 1.1 riastrad
305 1.1 riastrad INIT_LIST_HEAD(&node->hole_stack);
306 1.1 riastrad list_add(&node->node_list, &hole_node->node_list);
307 1.1 riastrad
308 1.3 riastrad BUG_ON(node->start < start);
309 1.3 riastrad BUG_ON(node->start < adj_start);
310 1.1 riastrad BUG_ON(node->start + node->size > adj_end);
311 1.1 riastrad BUG_ON(node->start + node->size > end);
312 1.1 riastrad
313 1.1 riastrad node->hole_follows = 0;
314 1.3 riastrad if (__drm_mm_hole_node_start(node) < hole_end) {
315 1.1 riastrad list_add(&node->hole_stack, &mm->hole_stack);
316 1.1 riastrad node->hole_follows = 1;
317 1.1 riastrad }
318 1.1 riastrad }
319 1.1 riastrad
320 1.1 riastrad /**
321 1.3 riastrad * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
322 1.3 riastrad * @mm: drm_mm to allocate from
323 1.3 riastrad * @node: preallocate node to insert
324 1.3 riastrad * @size: size of the allocation
325 1.3 riastrad * @alignment: alignment of the allocation
326 1.3 riastrad * @color: opaque tag value to use for this node
327 1.3 riastrad * @start: start of the allowed range for this node
328 1.3 riastrad * @end: end of the allowed range for this node
329 1.3 riastrad * @sflags: flags to fine-tune the allocation search
330 1.3 riastrad * @aflags: flags to fine-tune the allocation behavior
331 1.3 riastrad *
332 1.3 riastrad * The preallocated node must be cleared to 0.
333 1.3 riastrad *
334 1.3 riastrad * Returns:
335 1.3 riastrad * 0 on success, -ENOSPC if there's no suitable hole.
336 1.1 riastrad */
337 1.1 riastrad int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
338 1.4 riastrad u64 size, unsigned alignment,
339 1.3 riastrad unsigned long color,
340 1.4 riastrad u64 start, u64 end,
341 1.3 riastrad enum drm_mm_search_flags sflags,
342 1.3 riastrad enum drm_mm_allocator_flags aflags)
343 1.1 riastrad {
344 1.1 riastrad struct drm_mm_node *hole_node;
345 1.1 riastrad
346 1.1 riastrad hole_node = drm_mm_search_free_in_range_generic(mm,
347 1.1 riastrad size, alignment, color,
348 1.3 riastrad start, end, sflags);
349 1.1 riastrad if (!hole_node)
350 1.1 riastrad return -ENOSPC;
351 1.1 riastrad
352 1.1 riastrad drm_mm_insert_helper_range(hole_node, node,
353 1.1 riastrad size, alignment, color,
354 1.3 riastrad start, end, aflags);
355 1.1 riastrad return 0;
356 1.1 riastrad }
357 1.1 riastrad EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
358 1.1 riastrad
359 1.1 riastrad /**
360 1.3 riastrad * drm_mm_remove_node - Remove a memory node from the allocator.
361 1.3 riastrad * @node: drm_mm_node to remove
362 1.3 riastrad *
363 1.3 riastrad * This just removes a node from its drm_mm allocator. The node does not need to
364 1.3 riastrad * be cleared again before it can be re-inserted into this or any other drm_mm
365 1.3 riastrad * allocator. It is a bug to call this function on a un-allocated node.
366 1.1 riastrad */
367 1.1 riastrad void drm_mm_remove_node(struct drm_mm_node *node)
368 1.1 riastrad {
369 1.1 riastrad struct drm_mm *mm = node->mm;
370 1.1 riastrad struct drm_mm_node *prev_node;
371 1.1 riastrad
372 1.3 riastrad if (WARN_ON(!node->allocated))
373 1.3 riastrad return;
374 1.3 riastrad
375 1.1 riastrad BUG_ON(node->scanned_block || node->scanned_prev_free
376 1.1 riastrad || node->scanned_next_free);
377 1.1 riastrad
378 1.1 riastrad prev_node =
379 1.1 riastrad list_entry(node->node_list.prev, struct drm_mm_node, node_list);
380 1.1 riastrad
381 1.1 riastrad if (node->hole_follows) {
382 1.3 riastrad BUG_ON(__drm_mm_hole_node_start(node) ==
383 1.3 riastrad __drm_mm_hole_node_end(node));
384 1.1 riastrad list_del(&node->hole_stack);
385 1.1 riastrad } else
386 1.3 riastrad BUG_ON(__drm_mm_hole_node_start(node) !=
387 1.3 riastrad __drm_mm_hole_node_end(node));
388 1.3 riastrad
389 1.1 riastrad
390 1.1 riastrad if (!prev_node->hole_follows) {
391 1.1 riastrad prev_node->hole_follows = 1;
392 1.1 riastrad list_add(&prev_node->hole_stack, &mm->hole_stack);
393 1.1 riastrad } else
394 1.1 riastrad list_move(&prev_node->hole_stack, &mm->hole_stack);
395 1.1 riastrad
396 1.1 riastrad list_del(&node->node_list);
397 1.1 riastrad node->allocated = 0;
398 1.1 riastrad }
399 1.1 riastrad EXPORT_SYMBOL(drm_mm_remove_node);
400 1.1 riastrad
401 1.4 riastrad static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
402 1.1 riastrad {
403 1.1 riastrad if (end - start < size)
404 1.1 riastrad return 0;
405 1.1 riastrad
406 1.1 riastrad if (alignment) {
407 1.4 riastrad u64 tmp = start;
408 1.4 riastrad unsigned rem;
409 1.4 riastrad
410 1.4 riastrad rem = do_div(tmp, alignment);
411 1.4 riastrad if (rem)
412 1.4 riastrad start += alignment - rem;
413 1.1 riastrad }
414 1.1 riastrad
415 1.1 riastrad return end >= start + size;
416 1.1 riastrad }
417 1.1 riastrad
418 1.3 riastrad static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
419 1.4 riastrad u64 size,
420 1.3 riastrad unsigned alignment,
421 1.3 riastrad unsigned long color,
422 1.3 riastrad enum drm_mm_search_flags flags)
423 1.1 riastrad {
424 1.1 riastrad struct drm_mm_node *entry;
425 1.1 riastrad struct drm_mm_node *best;
426 1.4 riastrad u64 adj_start;
427 1.4 riastrad u64 adj_end;
428 1.4 riastrad u64 best_size;
429 1.1 riastrad
430 1.1 riastrad BUG_ON(mm->scanned_blocks);
431 1.1 riastrad
432 1.1 riastrad best = NULL;
433 1.1 riastrad best_size = ~0UL;
434 1.1 riastrad
435 1.3 riastrad __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
436 1.3 riastrad flags & DRM_MM_SEARCH_BELOW) {
437 1.4 riastrad u64 hole_size = adj_end - adj_start;
438 1.1 riastrad
439 1.1 riastrad if (mm->color_adjust) {
440 1.1 riastrad mm->color_adjust(entry, color, &adj_start, &adj_end);
441 1.1 riastrad if (adj_end <= adj_start)
442 1.1 riastrad continue;
443 1.1 riastrad }
444 1.1 riastrad
445 1.1 riastrad if (!check_free_hole(adj_start, adj_end, size, alignment))
446 1.1 riastrad continue;
447 1.1 riastrad
448 1.3 riastrad if (!(flags & DRM_MM_SEARCH_BEST))
449 1.1 riastrad return entry;
450 1.1 riastrad
451 1.3 riastrad if (hole_size < best_size) {
452 1.1 riastrad best = entry;
453 1.3 riastrad best_size = hole_size;
454 1.1 riastrad }
455 1.1 riastrad }
456 1.1 riastrad
457 1.1 riastrad return best;
458 1.1 riastrad }
459 1.1 riastrad
460 1.3 riastrad static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
461 1.4 riastrad u64 size,
462 1.1 riastrad unsigned alignment,
463 1.1 riastrad unsigned long color,
464 1.4 riastrad u64 start,
465 1.4 riastrad u64 end,
466 1.3 riastrad enum drm_mm_search_flags flags)
467 1.1 riastrad {
468 1.1 riastrad struct drm_mm_node *entry;
469 1.1 riastrad struct drm_mm_node *best;
470 1.4 riastrad u64 adj_start;
471 1.4 riastrad u64 adj_end;
472 1.4 riastrad u64 best_size;
473 1.1 riastrad
474 1.1 riastrad BUG_ON(mm->scanned_blocks);
475 1.1 riastrad
476 1.1 riastrad best = NULL;
477 1.1 riastrad best_size = ~0UL;
478 1.1 riastrad
479 1.3 riastrad __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
480 1.3 riastrad flags & DRM_MM_SEARCH_BELOW) {
481 1.4 riastrad u64 hole_size = adj_end - adj_start;
482 1.1 riastrad
483 1.1 riastrad if (mm->color_adjust) {
484 1.1 riastrad mm->color_adjust(entry, color, &adj_start, &adj_end);
485 1.1 riastrad if (adj_end <= adj_start)
486 1.1 riastrad continue;
487 1.1 riastrad }
488 1.1 riastrad
489 1.4 riastrad adj_start = max(adj_start, start);
490 1.4 riastrad adj_end = min(adj_end, end);
491 1.4 riastrad
492 1.1 riastrad if (!check_free_hole(adj_start, adj_end, size, alignment))
493 1.1 riastrad continue;
494 1.1 riastrad
495 1.3 riastrad if (!(flags & DRM_MM_SEARCH_BEST))
496 1.1 riastrad return entry;
497 1.1 riastrad
498 1.3 riastrad if (hole_size < best_size) {
499 1.1 riastrad best = entry;
500 1.3 riastrad best_size = hole_size;
501 1.1 riastrad }
502 1.1 riastrad }
503 1.1 riastrad
504 1.1 riastrad return best;
505 1.1 riastrad }
506 1.1 riastrad
507 1.1 riastrad /**
508 1.3 riastrad * drm_mm_replace_node - move an allocation from @old to @new
509 1.3 riastrad * @old: drm_mm_node to remove from the allocator
510 1.3 riastrad * @new: drm_mm_node which should inherit @old's allocation
511 1.3 riastrad *
512 1.3 riastrad * This is useful for when drivers embed the drm_mm_node structure and hence
513 1.3 riastrad * can't move allocations by reassigning pointers. It's a combination of remove
514 1.3 riastrad * and insert with the guarantee that the allocation start will match.
515 1.1 riastrad */
516 1.1 riastrad void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
517 1.1 riastrad {
518 1.1 riastrad list_replace(&old->node_list, &new->node_list);
519 1.1 riastrad list_replace(&old->hole_stack, &new->hole_stack);
520 1.1 riastrad new->hole_follows = old->hole_follows;
521 1.1 riastrad new->mm = old->mm;
522 1.1 riastrad new->start = old->start;
523 1.1 riastrad new->size = old->size;
524 1.1 riastrad new->color = old->color;
525 1.1 riastrad
526 1.1 riastrad old->allocated = 0;
527 1.1 riastrad new->allocated = 1;
528 1.1 riastrad }
529 1.1 riastrad EXPORT_SYMBOL(drm_mm_replace_node);
530 1.1 riastrad
531 1.1 riastrad /**
532 1.3 riastrad * DOC: lru scan roaster
533 1.3 riastrad *
534 1.3 riastrad * Very often GPUs need to have continuous allocations for a given object. When
535 1.3 riastrad * evicting objects to make space for a new one it is therefore not most
536 1.3 riastrad * efficient when we simply start to select all objects from the tail of an LRU
537 1.3 riastrad * until there's a suitable hole: Especially for big objects or nodes that
538 1.3 riastrad * otherwise have special allocation constraints there's a good chance we evict
539 1.3 riastrad * lots of (smaller) objects unecessarily.
540 1.3 riastrad *
541 1.3 riastrad * The DRM range allocator supports this use-case through the scanning
542 1.3 riastrad * interfaces. First a scan operation needs to be initialized with
543 1.3 riastrad * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
544 1.3 riastrad * objects to the roaster (probably by walking an LRU list, but this can be
545 1.3 riastrad * freely implemented) until a suitable hole is found or there's no further
546 1.3 riastrad * evitable object.
547 1.3 riastrad *
548 1.3 riastrad * The the driver must walk through all objects again in exactly the reverse
549 1.3 riastrad * order to restore the allocator state. Note that while the allocator is used
550 1.3 riastrad * in the scan mode no other operation is allowed.
551 1.3 riastrad *
552 1.3 riastrad * Finally the driver evicts all objects selected in the scan. Adding and
553 1.3 riastrad * removing an object is O(1), and since freeing a node is also O(1) the overall
554 1.3 riastrad * complexity is O(scanned_objects). So like the free stack which needs to be
555 1.3 riastrad * walked before a scan operation even begins this is linear in the number of
556 1.3 riastrad * objects. It doesn't seem to hurt badly.
557 1.3 riastrad */
558 1.3 riastrad
559 1.3 riastrad /**
560 1.3 riastrad * drm_mm_init_scan - initialize lru scanning
561 1.3 riastrad * @mm: drm_mm to scan
562 1.3 riastrad * @size: size of the allocation
563 1.3 riastrad * @alignment: alignment of the allocation
564 1.3 riastrad * @color: opaque tag value to use for the allocation
565 1.1 riastrad *
566 1.1 riastrad * This simply sets up the scanning routines with the parameters for the desired
567 1.3 riastrad * hole. Note that there's no need to specify allocation flags, since they only
568 1.3 riastrad * change the place a node is allocated from within a suitable hole.
569 1.1 riastrad *
570 1.3 riastrad * Warning:
571 1.3 riastrad * As long as the scan list is non-empty, no other operations than
572 1.1 riastrad * adding/removing nodes to/from the scan list are allowed.
573 1.1 riastrad */
574 1.1 riastrad void drm_mm_init_scan(struct drm_mm *mm,
575 1.4 riastrad u64 size,
576 1.1 riastrad unsigned alignment,
577 1.1 riastrad unsigned long color)
578 1.1 riastrad {
579 1.1 riastrad mm->scan_color = color;
580 1.1 riastrad mm->scan_alignment = alignment;
581 1.1 riastrad mm->scan_size = size;
582 1.1 riastrad mm->scanned_blocks = 0;
583 1.1 riastrad mm->scan_hit_start = 0;
584 1.1 riastrad mm->scan_hit_end = 0;
585 1.1 riastrad mm->scan_check_range = 0;
586 1.1 riastrad mm->prev_scanned_node = NULL;
587 1.1 riastrad }
588 1.1 riastrad EXPORT_SYMBOL(drm_mm_init_scan);
589 1.1 riastrad
590 1.1 riastrad /**
591 1.3 riastrad * drm_mm_init_scan - initialize range-restricted lru scanning
592 1.3 riastrad * @mm: drm_mm to scan
593 1.3 riastrad * @size: size of the allocation
594 1.3 riastrad * @alignment: alignment of the allocation
595 1.3 riastrad * @color: opaque tag value to use for the allocation
596 1.3 riastrad * @start: start of the allowed range for the allocation
597 1.3 riastrad * @end: end of the allowed range for the allocation
598 1.1 riastrad *
599 1.1 riastrad * This simply sets up the scanning routines with the parameters for the desired
600 1.3 riastrad * hole. Note that there's no need to specify allocation flags, since they only
601 1.3 riastrad * change the place a node is allocated from within a suitable hole.
602 1.1 riastrad *
603 1.3 riastrad * Warning:
604 1.3 riastrad * As long as the scan list is non-empty, no other operations than
605 1.1 riastrad * adding/removing nodes to/from the scan list are allowed.
606 1.1 riastrad */
607 1.1 riastrad void drm_mm_init_scan_with_range(struct drm_mm *mm,
608 1.4 riastrad u64 size,
609 1.1 riastrad unsigned alignment,
610 1.1 riastrad unsigned long color,
611 1.4 riastrad u64 start,
612 1.4 riastrad u64 end)
613 1.1 riastrad {
614 1.1 riastrad mm->scan_color = color;
615 1.1 riastrad mm->scan_alignment = alignment;
616 1.1 riastrad mm->scan_size = size;
617 1.1 riastrad mm->scanned_blocks = 0;
618 1.1 riastrad mm->scan_hit_start = 0;
619 1.1 riastrad mm->scan_hit_end = 0;
620 1.1 riastrad mm->scan_start = start;
621 1.1 riastrad mm->scan_end = end;
622 1.1 riastrad mm->scan_check_range = 1;
623 1.1 riastrad mm->prev_scanned_node = NULL;
624 1.1 riastrad }
625 1.1 riastrad EXPORT_SYMBOL(drm_mm_init_scan_with_range);
626 1.1 riastrad
627 1.1 riastrad /**
628 1.3 riastrad * drm_mm_scan_add_block - add a node to the scan list
629 1.3 riastrad * @node: drm_mm_node to add
630 1.3 riastrad *
631 1.1 riastrad * Add a node to the scan list that might be freed to make space for the desired
632 1.1 riastrad * hole.
633 1.1 riastrad *
634 1.3 riastrad * Returns:
635 1.3 riastrad * True if a hole has been found, false otherwise.
636 1.1 riastrad */
637 1.3 riastrad bool drm_mm_scan_add_block(struct drm_mm_node *node)
638 1.1 riastrad {
639 1.1 riastrad struct drm_mm *mm = node->mm;
640 1.1 riastrad struct drm_mm_node *prev_node;
641 1.4 riastrad u64 hole_start, hole_end;
642 1.4 riastrad u64 adj_start, adj_end;
643 1.1 riastrad
644 1.1 riastrad mm->scanned_blocks++;
645 1.1 riastrad
646 1.1 riastrad BUG_ON(node->scanned_block);
647 1.1 riastrad node->scanned_block = 1;
648 1.1 riastrad
649 1.1 riastrad prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
650 1.1 riastrad node_list);
651 1.1 riastrad
652 1.1 riastrad node->scanned_preceeds_hole = prev_node->hole_follows;
653 1.1 riastrad prev_node->hole_follows = 1;
654 1.1 riastrad list_del(&node->node_list);
655 1.1 riastrad node->node_list.prev = &prev_node->node_list;
656 1.1 riastrad node->node_list.next = &mm->prev_scanned_node->node_list;
657 1.1 riastrad mm->prev_scanned_node = node;
658 1.1 riastrad
659 1.1 riastrad adj_start = hole_start = drm_mm_hole_node_start(prev_node);
660 1.1 riastrad adj_end = hole_end = drm_mm_hole_node_end(prev_node);
661 1.1 riastrad
662 1.1 riastrad if (mm->scan_check_range) {
663 1.1 riastrad if (adj_start < mm->scan_start)
664 1.1 riastrad adj_start = mm->scan_start;
665 1.1 riastrad if (adj_end > mm->scan_end)
666 1.1 riastrad adj_end = mm->scan_end;
667 1.1 riastrad }
668 1.1 riastrad
669 1.1 riastrad if (mm->color_adjust)
670 1.1 riastrad mm->color_adjust(prev_node, mm->scan_color,
671 1.1 riastrad &adj_start, &adj_end);
672 1.1 riastrad
673 1.1 riastrad if (check_free_hole(adj_start, adj_end,
674 1.1 riastrad mm->scan_size, mm->scan_alignment)) {
675 1.1 riastrad mm->scan_hit_start = hole_start;
676 1.1 riastrad mm->scan_hit_end = hole_end;
677 1.3 riastrad return true;
678 1.1 riastrad }
679 1.1 riastrad
680 1.3 riastrad return false;
681 1.1 riastrad }
682 1.1 riastrad EXPORT_SYMBOL(drm_mm_scan_add_block);
683 1.1 riastrad
684 1.1 riastrad /**
685 1.3 riastrad * drm_mm_scan_remove_block - remove a node from the scan list
686 1.3 riastrad * @node: drm_mm_node to remove
687 1.1 riastrad *
688 1.1 riastrad * Nodes _must_ be removed in the exact same order from the scan list as they
689 1.1 riastrad * have been added, otherwise the internal state of the memory manager will be
690 1.1 riastrad * corrupted.
691 1.1 riastrad *
692 1.1 riastrad * When the scan list is empty, the selected memory nodes can be freed. An
693 1.3 riastrad * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
694 1.3 riastrad * return the just freed block (because its at the top of the free_stack list).
695 1.1 riastrad *
696 1.3 riastrad * Returns:
697 1.3 riastrad * True if this block should be evicted, false otherwise. Will always
698 1.3 riastrad * return false when no hole has been found.
699 1.1 riastrad */
700 1.3 riastrad bool drm_mm_scan_remove_block(struct drm_mm_node *node)
701 1.1 riastrad {
702 1.1 riastrad struct drm_mm *mm = node->mm;
703 1.1 riastrad struct drm_mm_node *prev_node;
704 1.1 riastrad
705 1.1 riastrad mm->scanned_blocks--;
706 1.1 riastrad
707 1.1 riastrad BUG_ON(!node->scanned_block);
708 1.1 riastrad node->scanned_block = 0;
709 1.1 riastrad
710 1.1 riastrad prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
711 1.1 riastrad node_list);
712 1.1 riastrad
713 1.1 riastrad prev_node->hole_follows = node->scanned_preceeds_hole;
714 1.1 riastrad list_add(&node->node_list, &prev_node->node_list);
715 1.1 riastrad
716 1.1 riastrad return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
717 1.1 riastrad node->start < mm->scan_hit_end);
718 1.1 riastrad }
719 1.1 riastrad EXPORT_SYMBOL(drm_mm_scan_remove_block);
720 1.1 riastrad
721 1.3 riastrad /**
722 1.3 riastrad * drm_mm_clean - checks whether an allocator is clean
723 1.3 riastrad * @mm: drm_mm allocator to check
724 1.3 riastrad *
725 1.3 riastrad * Returns:
726 1.3 riastrad * True if the allocator is completely free, false if there's still a node
727 1.3 riastrad * allocated in it.
728 1.3 riastrad */
729 1.3 riastrad bool drm_mm_clean(struct drm_mm * mm)
730 1.1 riastrad {
731 1.1 riastrad struct list_head *head = &mm->head_node.node_list;
732 1.1 riastrad
733 1.1 riastrad return (head->next->next == head);
734 1.1 riastrad }
735 1.1 riastrad EXPORT_SYMBOL(drm_mm_clean);
736 1.1 riastrad
737 1.3 riastrad /**
738 1.3 riastrad * drm_mm_init - initialize a drm-mm allocator
739 1.3 riastrad * @mm: the drm_mm structure to initialize
740 1.3 riastrad * @start: start of the range managed by @mm
741 1.3 riastrad * @size: end of the range managed by @mm
742 1.3 riastrad *
743 1.3 riastrad * Note that @mm must be cleared to 0 before calling this function.
744 1.3 riastrad */
745 1.4 riastrad void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
746 1.1 riastrad {
747 1.1 riastrad INIT_LIST_HEAD(&mm->hole_stack);
748 1.1 riastrad mm->scanned_blocks = 0;
749 1.1 riastrad
750 1.1 riastrad /* Clever trick to avoid a special case in the free hole tracking. */
751 1.1 riastrad INIT_LIST_HEAD(&mm->head_node.node_list);
752 1.1 riastrad INIT_LIST_HEAD(&mm->head_node.hole_stack);
753 1.1 riastrad mm->head_node.hole_follows = 1;
754 1.1 riastrad mm->head_node.scanned_block = 0;
755 1.1 riastrad mm->head_node.scanned_prev_free = 0;
756 1.1 riastrad mm->head_node.scanned_next_free = 0;
757 1.1 riastrad mm->head_node.mm = mm;
758 1.1 riastrad mm->head_node.start = start + size;
759 1.1 riastrad mm->head_node.size = start - mm->head_node.start;
760 1.1 riastrad list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
761 1.1 riastrad
762 1.1 riastrad mm->color_adjust = NULL;
763 1.1 riastrad }
764 1.1 riastrad EXPORT_SYMBOL(drm_mm_init);
765 1.1 riastrad
766 1.3 riastrad /**
767 1.3 riastrad * drm_mm_takedown - clean up a drm_mm allocator
768 1.3 riastrad * @mm: drm_mm allocator to clean up
769 1.3 riastrad *
770 1.3 riastrad * Note that it is a bug to call this function on an allocator which is not
771 1.3 riastrad * clean.
772 1.3 riastrad */
773 1.1 riastrad void drm_mm_takedown(struct drm_mm * mm)
774 1.1 riastrad {
775 1.3 riastrad WARN(!list_empty(&mm->head_node.node_list),
776 1.3 riastrad "Memory manager not clean during takedown.\n");
777 1.3 riastrad }
778 1.3 riastrad EXPORT_SYMBOL(drm_mm_takedown);
779 1.1 riastrad
780 1.4 riastrad static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
781 1.4 riastrad const char *prefix)
782 1.3 riastrad {
783 1.4 riastrad u64 hole_start, hole_end, hole_size;
784 1.1 riastrad
785 1.3 riastrad if (entry->hole_follows) {
786 1.3 riastrad hole_start = drm_mm_hole_node_start(entry);
787 1.3 riastrad hole_end = drm_mm_hole_node_end(entry);
788 1.3 riastrad hole_size = hole_end - hole_start;
789 1.4 riastrad pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
790 1.4 riastrad hole_end, hole_size);
791 1.3 riastrad return hole_size;
792 1.1 riastrad }
793 1.1 riastrad
794 1.3 riastrad return 0;
795 1.1 riastrad }
796 1.1 riastrad
797 1.3 riastrad /**
798 1.3 riastrad * drm_mm_debug_table - dump allocator state to dmesg
799 1.3 riastrad * @mm: drm_mm allocator to dump
800 1.3 riastrad * @prefix: prefix to use for dumping to dmesg
801 1.3 riastrad */
802 1.1 riastrad void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
803 1.1 riastrad {
804 1.1 riastrad struct drm_mm_node *entry;
805 1.4 riastrad u64 total_used = 0, total_free = 0, total = 0;
806 1.1 riastrad
807 1.3 riastrad total_free += drm_mm_debug_hole(&mm->head_node, prefix);
808 1.1 riastrad
809 1.1 riastrad drm_mm_for_each_node(entry, mm) {
810 1.4 riastrad pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
811 1.4 riastrad entry->start + entry->size, entry->size);
812 1.1 riastrad total_used += entry->size;
813 1.3 riastrad total_free += drm_mm_debug_hole(entry, prefix);
814 1.1 riastrad }
815 1.1 riastrad total = total_free + total_used;
816 1.1 riastrad
817 1.4 riastrad pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
818 1.4 riastrad total_used, total_free);
819 1.1 riastrad }
820 1.1 riastrad EXPORT_SYMBOL(drm_mm_debug_table);
821 1.1 riastrad
822 1.1 riastrad #if defined(CONFIG_DEBUG_FS)
823 1.4 riastrad static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
824 1.3 riastrad {
825 1.4 riastrad u64 hole_start, hole_end, hole_size;
826 1.3 riastrad
827 1.3 riastrad if (entry->hole_follows) {
828 1.3 riastrad hole_start = drm_mm_hole_node_start(entry);
829 1.3 riastrad hole_end = drm_mm_hole_node_end(entry);
830 1.3 riastrad hole_size = hole_end - hole_start;
831 1.4 riastrad seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
832 1.4 riastrad hole_end, hole_size);
833 1.3 riastrad return hole_size;
834 1.3 riastrad }
835 1.3 riastrad
836 1.3 riastrad return 0;
837 1.3 riastrad }
838 1.3 riastrad
839 1.3 riastrad /**
840 1.3 riastrad * drm_mm_dump_table - dump allocator state to a seq_file
841 1.3 riastrad * @m: seq_file to dump to
842 1.3 riastrad * @mm: drm_mm allocator to dump
843 1.3 riastrad */
844 1.1 riastrad int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
845 1.1 riastrad {
846 1.1 riastrad struct drm_mm_node *entry;
847 1.4 riastrad u64 total_used = 0, total_free = 0, total = 0;
848 1.1 riastrad
849 1.3 riastrad total_free += drm_mm_dump_hole(m, &mm->head_node);
850 1.1 riastrad
851 1.1 riastrad drm_mm_for_each_node(entry, mm) {
852 1.4 riastrad seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
853 1.4 riastrad entry->start + entry->size, entry->size);
854 1.1 riastrad total_used += entry->size;
855 1.3 riastrad total_free += drm_mm_dump_hole(m, entry);
856 1.1 riastrad }
857 1.1 riastrad total = total_free + total_used;
858 1.1 riastrad
859 1.4 riastrad seq_printf(m, "total: %llu, used %llu free %llu\n", total,
860 1.4 riastrad total_used, total_free);
861 1.1 riastrad return 0;
862 1.1 riastrad }
863 1.1 riastrad EXPORT_SYMBOL(drm_mm_dump_table);
864 1.1 riastrad #endif
865