drm_mm.c revision 1.9 1 /* $NetBSD: drm_mm.c,v 1.9 2021/12/19 11:00:36 riastradh Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Copyright 2016 Intel Corporation
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 *
30 **************************************************************************/
31
32 /*
33 * Generic simple memory manager implementation. Intended to be used as a base
34 * class implementation for more advanced memory managers.
35 *
36 * Note that the algorithm used is quite simple and there might be substantial
37 * performance gains if a smarter free list is implemented. Currently it is
38 * just an unordered stack of free regions. This could easily be improved if
39 * an RB-tree is used instead. At least if we expect heavy fragmentation.
40 *
41 * Aligned allocations can also see improvement.
42 *
43 * Authors:
44 * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.9 2021/12/19 11:00:36 riastradh Exp $");
49
50 #include <linux/export.h>
51 #include <linux/interval_tree_generic.h>
52 #include <linux/seq_file.h>
53 #include <linux/slab.h>
54 #include <linux/stacktrace.h>
55
56 #include <drm/drm_mm.h>
57
58 /**
59 * DOC: Overview
60 *
61 * drm_mm provides a simple range allocator. The drivers are free to use the
62 * resource allocator from the linux core if it suits them, the upside of drm_mm
63 * is that it's in the DRM core. Which means that it's easier to extend for
64 * some of the crazier special purpose needs of gpus.
65 *
66 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
67 * Drivers are free to embed either of them into their own suitable
68 * datastructures. drm_mm itself will not do any memory allocations of its own,
69 * so if drivers choose not to embed nodes they need to still allocate them
70 * themselves.
71 *
72 * The range allocator also supports reservation of preallocated blocks. This is
73 * useful for taking over initial mode setting configurations from the firmware,
74 * where an object needs to be created which exactly matches the firmware's
75 * scanout target. As long as the range is still free it can be inserted anytime
76 * after the allocator is initialized, which helps with avoiding looped
77 * dependencies in the driver load sequence.
78 *
79 * drm_mm maintains a stack of most recently freed holes, which of all
80 * simplistic datastructures seems to be a fairly decent approach to clustering
81 * allocations and avoiding too much fragmentation. This means free space
82 * searches are O(num_holes). Given that all the fancy features drm_mm supports
83 * something better would be fairly complex and since gfx thrashing is a fairly
84 * steep cliff not a real concern. Removing a node again is O(1).
85 *
86 * drm_mm supports a few features: Alignment and range restrictions can be
87 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
88 * opaque unsigned long) which in conjunction with a driver callback can be used
89 * to implement sophisticated placement restrictions. The i915 DRM driver uses
90 * this to implement guard pages between incompatible caching domains in the
91 * graphics TT.
92 *
93 * Two behaviors are supported for searching and allocating: bottom-up and
94 * top-down. The default is bottom-up. Top-down allocation can be used if the
95 * memory area has different restrictions, or just to reduce fragmentation.
96 *
97 * Finally iteration helpers to walk all nodes and all holes are provided as are
98 * some basic allocator dumpers for debugging.
99 *
100 * Note that this range allocator is not thread-safe, drivers need to protect
101 * modifications with their own locking. The idea behind this is that for a full
102 * memory manager additional data needs to be protected anyway, hence internal
103 * locking would be fully redundant.
104 */
105
106 #ifdef CONFIG_DRM_DEBUG_MM
107 #include <linux/stackdepot.h>
108
109 #define STACKDEPTH 32
110 #define BUFSZ 4096
111
112 static noinline void save_stack(struct drm_mm_node *node)
113 {
114 unsigned long entries[STACKDEPTH];
115 unsigned int n;
116
117 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
118
119 /* May be called under spinlock, so avoid sleeping */
120 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
121 }
122
123 static void show_leaks(struct drm_mm *mm)
124 {
125 struct drm_mm_node *node;
126 unsigned long *entries;
127 unsigned int nr_entries;
128 char *buf;
129
130 buf = kmalloc(BUFSZ, GFP_KERNEL);
131 if (!buf)
132 return;
133
134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
135 if (!node->stack) {
136 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
137 node->start, node->size);
138 continue;
139 }
140
141 nr_entries = stack_depot_fetch(node->stack, &entries);
142 stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
143 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
144 node->start, node->size, buf);
145 }
146
147 kfree(buf);
148 }
149
150 #undef STACKDEPTH
151 #undef BUFSZ
152 #else
153 static void save_stack(struct drm_mm_node *node) { }
154 static void show_leaks(struct drm_mm *mm) { }
155 #endif
156
157 #define START(node) ((node)->start)
158 #define LAST(node) ((node)->start + (node)->size - 1)
159
160 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
161 u64, __subtree_last,
162 START, LAST, static inline, drm_mm_interval_tree)
163
164 struct drm_mm_node *
165 __drm_mm_interval_first(const struct drm_mm *mm_const, u64 start, u64 last)
166 {
167 struct drm_mm *mm = __UNCONST(mm_const);
168 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
169 start, last) ?: (struct drm_mm_node *)&mm->head_node;
170 }
171 EXPORT_SYMBOL(__drm_mm_interval_first);
172
173 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
174 struct drm_mm_node *node)
175 {
176 struct drm_mm *mm = hole_node->mm;
177 struct rb_node **link, *rb;
178 struct drm_mm_node *parent;
179 bool leftmost;
180
181 node->__subtree_last = LAST(node);
182
183 if (drm_mm_node_allocated(hole_node)) {
184 rb = &hole_node->rb;
185 while (rb) {
186 parent = rb_entry(rb, struct drm_mm_node, rb);
187 if (parent->__subtree_last >= node->__subtree_last)
188 break;
189
190 parent->__subtree_last = node->__subtree_last;
191 rb = rb_parent(rb);
192 }
193
194 rb = &hole_node->rb;
195 link = &hole_node->rb.rb_right;
196 leftmost = false;
197 } else {
198 rb = NULL;
199 link = &mm->interval_tree.rb_root.rb_node;
200 leftmost = true;
201 }
202
203 while (*link) {
204 rb = *link;
205 parent = rb_entry(rb, struct drm_mm_node, rb);
206 if (parent->__subtree_last < node->__subtree_last)
207 parent->__subtree_last = node->__subtree_last;
208 if (node->start < parent->start) {
209 link = &parent->rb.rb_left;
210 } else {
211 link = &parent->rb.rb_right;
212 leftmost = false;
213 }
214 }
215
216 rb_link_node(&node->rb, rb, link);
217 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
218 &drm_mm_interval_tree_augment);
219 }
220
221 #ifdef __NetBSD__
222
223 static int
224 compare_hole_addrs(void *cookie, const void *va, const void *vb)
225 {
226 const struct drm_mm_node *a = va, *b = vb;
227 const u64 aa = __drm_mm_hole_node_start(a);
228 const u64 ba = __drm_mm_hole_node_start(b);
229
230 if (aa < ba)
231 return -1;
232 if (aa > ba)
233 return +1;
234 return 0;
235 }
236
237 static int
238 compare_hole_addr_key(void *cookie, const void *vn, const void *vk)
239 {
240 const struct drm_mm_node *n = vn;
241 const u64 a = __drm_mm_hole_node_start(n);
242 const u64 *k = vk;
243
244 if (a < *k)
245 return -1;
246 if (a > *k)
247 return +1;
248 return 0;
249 }
250
251 static const rb_tree_ops_t holes_addr_rb_ops = {
252 .rbto_compare_nodes = compare_hole_addrs,
253 .rbto_compare_key = compare_hole_addr_key,
254 .rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_addr),
255 };
256
257 #else
258
259 #define RB_INSERT(root, member, expr) do { \
260 struct rb_node **link = &root.rb_node, *rb = NULL; \
261 u64 x = expr(node); \
262 while (*link) { \
263 rb = *link; \
264 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
265 link = &rb->rb_left; \
266 else \
267 link = &rb->rb_right; \
268 } \
269 rb_link_node(&node->member, rb, link); \
270 rb_insert_color(&node->member, &root); \
271 } while (0)
272
273 #endif
274
275 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
276 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
277
278 static u64 rb_to_hole_size(struct rb_node *rb)
279 {
280 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
281 }
282
283 static int
284 compare_hole_sizes(void *cookie, const void *va, const void *vb)
285 {
286 const struct drm_mm_node *a = va, *b = vb;
287
288 if (a->hole_size < b->hole_size)
289 return -1;
290 if (a->hole_size > b->hole_size)
291 return +1;
292 return 0;
293 }
294
295 static int
296 compare_hole_size_key(void *cookie, const void *vn, const void *vk)
297 {
298 const struct drm_mm_node *n = vn;
299 const u64 *k = vk;
300
301 if (n->hole_size < *k)
302 return -1;
303 if (n->hole_size > *k)
304 return +1;
305 return 0;
306 }
307
308 static const rb_tree_ops_t holes_size_rb_ops = {
309 .rbto_compare_nodes = compare_hole_sizes,
310 .rbto_compare_key = compare_hole_size_key,
311 .rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_size),
312 };
313
314 static void insert_hole_size(struct rb_root_cached *root,
315 struct drm_mm_node *node)
316 {
317 #ifdef __NetBSD__
318 struct drm_mm_node *collision __diagused;
319 collision = rb_tree_insert_node(&root->rb_root.rbr_tree, node);
320 KASSERT(collision == node);
321 #else
322 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
323 u64 x = node->hole_size;
324 bool first = true;
325
326 while (*link) {
327 rb = *link;
328 if (x > rb_to_hole_size(rb)) {
329 link = &rb->rb_left;
330 } else {
331 link = &rb->rb_right;
332 first = false;
333 }
334 }
335
336 rb_link_node(&node->rb_hole_size, rb, link);
337 rb_insert_color_cached(&node->rb_hole_size, root, first);
338 #endif
339 }
340
341 static void add_hole(struct drm_mm_node *node)
342 {
343 struct drm_mm *mm = node->mm;
344
345 node->hole_size =
346 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
347 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
348
349 insert_hole_size(&mm->holes_size, node);
350 #ifdef __NetBSD__
351 struct drm_mm_node *collision __diagused;
352 collision = rb_tree_insert_node(&mm->holes_addr, node);
353 KASSERT(collision == node);
354 #else
355 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
356 #endif
357
358 list_add(&node->hole_stack, &mm->hole_stack);
359 }
360
361 static void rm_hole(struct drm_mm_node *node)
362 {
363 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
364
365 list_del(&node->hole_stack);
366 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
367 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
368 node->hole_size = 0;
369
370 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
371 }
372
373 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
374 {
375 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
376 }
377
378 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
379 {
380 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
381 }
382
383 static inline u64 rb_hole_size(struct rb_node *rb)
384 {
385 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
386 }
387
388 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
389 {
390 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
391 struct drm_mm_node *best = NULL;
392
393 do {
394 struct drm_mm_node *node =
395 rb_entry(rb, struct drm_mm_node, rb_hole_size);
396
397 if (size <= node->hole_size) {
398 best = node;
399 rb = rb->rb_right;
400 } else {
401 rb = rb->rb_left;
402 }
403 } while (rb);
404
405 return best;
406 }
407
408 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
409 {
410 struct rb_node *rb = mm->holes_addr.rb_node;
411 struct drm_mm_node *node = NULL;
412
413 while (rb) {
414 u64 hole_start;
415
416 node = rb_hole_addr_to_node(rb);
417 hole_start = __drm_mm_hole_node_start(node);
418
419 if (addr < hole_start)
420 rb = node->rb_hole_addr.rb_left;
421 else if (addr > hole_start + node->hole_size)
422 rb = node->rb_hole_addr.rb_right;
423 else
424 break;
425 }
426
427 return node;
428 }
429
430 static struct drm_mm_node *
431 first_hole(struct drm_mm *mm,
432 u64 start, u64 end, u64 size,
433 enum drm_mm_insert_mode mode)
434 {
435 switch (mode) {
436 default:
437 case DRM_MM_INSERT_BEST:
438 return best_hole(mm, size);
439
440 case DRM_MM_INSERT_LOW:
441 return find_hole(mm, start);
442
443 case DRM_MM_INSERT_HIGH:
444 return find_hole(mm, end);
445
446 case DRM_MM_INSERT_EVICT:
447 return list_first_entry_or_null(&mm->hole_stack,
448 struct drm_mm_node,
449 hole_stack);
450 }
451 }
452
453 static struct drm_mm_node *
454 next_hole(struct drm_mm *mm,
455 struct drm_mm_node *node,
456 enum drm_mm_insert_mode mode)
457 {
458 switch (mode) {
459 default:
460 case DRM_MM_INSERT_BEST:
461 #ifdef __NetBSD__
462 return RB_TREE_NEXT(&mm->holes_size.rb_root.rbr_tree, node);
463 #else
464 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
465 #endif
466
467 case DRM_MM_INSERT_LOW:
468 #ifdef __NetBSD__
469 return RB_TREE_NEXT(&mm->holes_addr.rbr_tree, node);
470 #else
471 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
472 #endif
473
474 case DRM_MM_INSERT_HIGH:
475 #ifdef __NetBSD__
476 return RB_TREE_PREV(&mm->holes_addr.rbr_tree, node);
477 #else
478 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
479 #endif
480
481 case DRM_MM_INSERT_EVICT:
482 node = list_next_entry(node, hole_stack);
483 return &node->hole_stack == &mm->hole_stack ? NULL : node;
484 }
485 }
486
487 /**
488 * drm_mm_reserve_node - insert an pre-initialized node
489 * @mm: drm_mm allocator to insert @node into
490 * @node: drm_mm_node to insert
491 *
492 * This functions inserts an already set-up &drm_mm_node into the allocator,
493 * meaning that start, size and color must be set by the caller. All other
494 * fields must be cleared to 0. This is useful to initialize the allocator with
495 * preallocated objects which must be set-up before the range allocator can be
496 * set-up, e.g. when taking over a firmware framebuffer.
497 *
498 * Returns:
499 * 0 on success, -ENOSPC if there's no hole where @node is.
500 */
501 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
502 {
503 u64 end = node->start + node->size;
504 struct drm_mm_node *hole;
505 u64 hole_start, hole_end;
506 u64 adj_start, adj_end;
507
508 end = node->start + node->size;
509 if (unlikely(end <= node->start))
510 return -ENOSPC;
511
512 /* Find the relevant hole to add our node to */
513 hole = find_hole(mm, node->start);
514 if (!hole)
515 return -ENOSPC;
516
517 adj_start = hole_start = __drm_mm_hole_node_start(hole);
518 adj_end = hole_end = hole_start + hole->hole_size;
519
520 if (mm->color_adjust)
521 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
522
523 if (adj_start > node->start || adj_end < end)
524 return -ENOSPC;
525
526 node->mm = mm;
527
528 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
529 list_add(&node->node_list, &hole->node_list);
530 drm_mm_interval_tree_add_node(hole, node);
531 node->hole_size = 0;
532
533 rm_hole(hole);
534 if (node->start > hole_start)
535 add_hole(hole);
536 if (end < hole_end)
537 add_hole(node);
538
539 save_stack(node);
540 return 0;
541 }
542 EXPORT_SYMBOL(drm_mm_reserve_node);
543
544 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
545 {
546 return rb ? rb_to_hole_size(rb) : 0;
547 }
548
549 /**
550 * drm_mm_insert_node_in_range - ranged search for space and insert @node
551 * @mm: drm_mm to allocate from
552 * @node: preallocate node to insert
553 * @size: size of the allocation
554 * @alignment: alignment of the allocation
555 * @color: opaque tag value to use for this node
556 * @range_start: start of the allowed range for this node
557 * @range_end: end of the allowed range for this node
558 * @mode: fine-tune the allocation search and placement
559 *
560 * The preallocated @node must be cleared to 0.
561 *
562 * Returns:
563 * 0 on success, -ENOSPC if there's no suitable hole.
564 */
565 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
566 struct drm_mm_node * const node,
567 u64 size, u64 alignment,
568 unsigned long color,
569 u64 range_start, u64 range_end,
570 enum drm_mm_insert_mode mode)
571 {
572 struct drm_mm_node *hole;
573 u64 remainder_mask;
574 bool once;
575
576 DRM_MM_BUG_ON(range_start > range_end);
577
578 if (unlikely(size == 0 || range_end - range_start < size))
579 return -ENOSPC;
580
581 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
582 return -ENOSPC;
583
584 if (alignment <= 1)
585 alignment = 0;
586
587 once = mode & DRM_MM_INSERT_ONCE;
588 mode &= ~DRM_MM_INSERT_ONCE;
589
590 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
591 for (hole = first_hole(mm, range_start, range_end, size, mode);
592 hole;
593 hole = once ? NULL : next_hole(mm, hole, mode)) {
594 u64 hole_start = __drm_mm_hole_node_start(hole);
595 u64 hole_end = hole_start + hole->hole_size;
596 u64 adj_start, adj_end;
597 u64 col_start, col_end;
598
599 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
600 break;
601
602 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
603 break;
604
605 col_start = hole_start;
606 col_end = hole_end;
607 if (mm->color_adjust)
608 mm->color_adjust(hole, color, &col_start, &col_end);
609
610 adj_start = max(col_start, range_start);
611 adj_end = min(col_end, range_end);
612
613 if (adj_end <= adj_start || adj_end - adj_start < size)
614 continue;
615
616 if (mode == DRM_MM_INSERT_HIGH)
617 adj_start = adj_end - size;
618
619 if (alignment) {
620 u64 rem;
621
622 if (likely(remainder_mask))
623 rem = adj_start & remainder_mask;
624 else
625 div64_u64_rem(adj_start, alignment, &rem);
626 if (rem) {
627 adj_start -= rem;
628 if (mode != DRM_MM_INSERT_HIGH)
629 adj_start += alignment;
630
631 if (adj_start < max(col_start, range_start) ||
632 min(col_end, range_end) - adj_start < size)
633 continue;
634
635 if (adj_end <= adj_start ||
636 adj_end - adj_start < size)
637 continue;
638 }
639 }
640
641 node->mm = mm;
642 node->size = size;
643 node->start = adj_start;
644 node->color = color;
645 node->hole_size = 0;
646
647 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
648 list_add(&node->node_list, &hole->node_list);
649 drm_mm_interval_tree_add_node(hole, node);
650
651 rm_hole(hole);
652 if (adj_start > hole_start)
653 add_hole(hole);
654 if (adj_start + size < hole_end)
655 add_hole(node);
656
657 save_stack(node);
658 return 0;
659 }
660
661 return -ENOSPC;
662 }
663 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
664
665 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
666 {
667 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
668 }
669
670 /**
671 * drm_mm_remove_node - Remove a memory node from the allocator.
672 * @node: drm_mm_node to remove
673 *
674 * This just removes a node from its drm_mm allocator. The node does not need to
675 * be cleared again before it can be re-inserted into this or any other drm_mm
676 * allocator. It is a bug to call this function on a unallocated node.
677 */
678 void drm_mm_remove_node(struct drm_mm_node *node)
679 {
680 struct drm_mm *mm = node->mm;
681 struct drm_mm_node *prev_node;
682
683 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
684 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
685
686 prev_node = list_prev_entry(node, node_list);
687
688 if (drm_mm_hole_follows(node))
689 rm_hole(node);
690
691 drm_mm_interval_tree_remove(node, &mm->interval_tree);
692 list_del(&node->node_list);
693
694 if (drm_mm_hole_follows(prev_node))
695 rm_hole(prev_node);
696 add_hole(prev_node);
697
698 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
699 }
700 EXPORT_SYMBOL(drm_mm_remove_node);
701
702 /**
703 * drm_mm_replace_node - move an allocation from @old to @new
704 * @old: drm_mm_node to remove from the allocator
705 * @new: drm_mm_node which should inherit @old's allocation
706 *
707 * This is useful for when drivers embed the drm_mm_node structure and hence
708 * can't move allocations by reassigning pointers. It's a combination of remove
709 * and insert with the guarantee that the allocation start will match.
710 */
711 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
712 {
713 struct drm_mm *mm = old->mm;
714
715 DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
716
717 *new = *old;
718
719 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
720 list_replace(&old->node_list, &new->node_list);
721 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
722
723 if (drm_mm_hole_follows(old)) {
724 list_replace(&old->hole_stack, &new->hole_stack);
725 rb_replace_node_cached(&old->rb_hole_size,
726 &new->rb_hole_size,
727 &mm->holes_size);
728 rb_replace_node(&old->rb_hole_addr,
729 &new->rb_hole_addr,
730 &mm->holes_addr);
731 }
732
733 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
734 }
735 EXPORT_SYMBOL(drm_mm_replace_node);
736
737 /**
738 * DOC: lru scan roster
739 *
740 * Very often GPUs need to have continuous allocations for a given object. When
741 * evicting objects to make space for a new one it is therefore not most
742 * efficient when we simply start to select all objects from the tail of an LRU
743 * until there's a suitable hole: Especially for big objects or nodes that
744 * otherwise have special allocation constraints there's a good chance we evict
745 * lots of (smaller) objects unnecessarily.
746 *
747 * The DRM range allocator supports this use-case through the scanning
748 * interfaces. First a scan operation needs to be initialized with
749 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
750 * objects to the roster, probably by walking an LRU list, but this can be
751 * freely implemented. Eviction candiates are added using
752 * drm_mm_scan_add_block() until a suitable hole is found or there are no
753 * further evictable objects. Eviction roster metadata is tracked in &struct
754 * drm_mm_scan.
755 *
756 * The driver must walk through all objects again in exactly the reverse
757 * order to restore the allocator state. Note that while the allocator is used
758 * in the scan mode no other operation is allowed.
759 *
760 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
761 * reported true) in the scan, and any overlapping nodes after color adjustment
762 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
763 * since freeing a node is also O(1) the overall complexity is
764 * O(scanned_objects). So like the free stack which needs to be walked before a
765 * scan operation even begins this is linear in the number of objects. It
766 * doesn't seem to hurt too badly.
767 */
768
769 /**
770 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
771 * @scan: scan state
772 * @mm: drm_mm to scan
773 * @size: size of the allocation
774 * @alignment: alignment of the allocation
775 * @color: opaque tag value to use for the allocation
776 * @start: start of the allowed range for the allocation
777 * @end: end of the allowed range for the allocation
778 * @mode: fine-tune the allocation search and placement
779 *
780 * This simply sets up the scanning routines with the parameters for the desired
781 * hole.
782 *
783 * Warning:
784 * As long as the scan list is non-empty, no other operations than
785 * adding/removing nodes to/from the scan list are allowed.
786 */
787 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
788 struct drm_mm *mm,
789 u64 size,
790 u64 alignment,
791 unsigned long color,
792 u64 start,
793 u64 end,
794 enum drm_mm_insert_mode mode)
795 {
796 DRM_MM_BUG_ON(start >= end);
797 DRM_MM_BUG_ON(!size || size > end - start);
798 DRM_MM_BUG_ON(mm->scan_active);
799
800 scan->mm = mm;
801
802 if (alignment <= 1)
803 alignment = 0;
804
805 scan->color = color;
806 scan->alignment = alignment;
807 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
808 scan->size = size;
809 scan->mode = mode;
810
811 DRM_MM_BUG_ON(end <= start);
812 scan->range_start = start;
813 scan->range_end = end;
814
815 scan->hit_start = U64_MAX;
816 scan->hit_end = 0;
817 }
818 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
819
820 /**
821 * drm_mm_scan_add_block - add a node to the scan list
822 * @scan: the active drm_mm scanner
823 * @node: drm_mm_node to add
824 *
825 * Add a node to the scan list that might be freed to make space for the desired
826 * hole.
827 *
828 * Returns:
829 * True if a hole has been found, false otherwise.
830 */
831 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
832 struct drm_mm_node *node)
833 {
834 struct drm_mm *mm = scan->mm;
835 struct drm_mm_node *hole;
836 u64 hole_start, hole_end;
837 u64 col_start, col_end;
838 u64 adj_start, adj_end;
839
840 DRM_MM_BUG_ON(node->mm != mm);
841 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
842 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
843 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
844 mm->scan_active++;
845
846 /* Remove this block from the node_list so that we enlarge the hole
847 * (distance between the end of our previous node and the start of
848 * or next), without poisoning the link so that we can restore it
849 * later in drm_mm_scan_remove_block().
850 */
851 hole = list_prev_entry(node, node_list);
852 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
853 __list_del_entry(&node->node_list);
854
855 hole_start = __drm_mm_hole_node_start(hole);
856 hole_end = __drm_mm_hole_node_end(hole);
857
858 col_start = hole_start;
859 col_end = hole_end;
860 if (mm->color_adjust)
861 mm->color_adjust(hole, scan->color, &col_start, &col_end);
862
863 adj_start = max(col_start, scan->range_start);
864 adj_end = min(col_end, scan->range_end);
865 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
866 return false;
867
868 if (scan->mode == DRM_MM_INSERT_HIGH)
869 adj_start = adj_end - scan->size;
870
871 if (scan->alignment) {
872 u64 rem;
873
874 if (likely(scan->remainder_mask))
875 rem = adj_start & scan->remainder_mask;
876 else
877 div64_u64_rem(adj_start, scan->alignment, &rem);
878 if (rem) {
879 adj_start -= rem;
880 if (scan->mode != DRM_MM_INSERT_HIGH)
881 adj_start += scan->alignment;
882 if (adj_start < max(col_start, scan->range_start) ||
883 min(col_end, scan->range_end) - adj_start < scan->size)
884 return false;
885
886 if (adj_end <= adj_start ||
887 adj_end - adj_start < scan->size)
888 return false;
889 }
890 }
891
892 scan->hit_start = adj_start;
893 scan->hit_end = adj_start + scan->size;
894
895 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
896 DRM_MM_BUG_ON(scan->hit_start < hole_start);
897 DRM_MM_BUG_ON(scan->hit_end > hole_end);
898
899 return true;
900 }
901 EXPORT_SYMBOL(drm_mm_scan_add_block);
902
903 /**
904 * drm_mm_scan_remove_block - remove a node from the scan list
905 * @scan: the active drm_mm scanner
906 * @node: drm_mm_node to remove
907 *
908 * Nodes **must** be removed in exactly the reverse order from the scan list as
909 * they have been added (e.g. using list_add() as they are added and then
910 * list_for_each() over that eviction list to remove), otherwise the internal
911 * state of the memory manager will be corrupted.
912 *
913 * When the scan list is empty, the selected memory nodes can be freed. An
914 * immediately following drm_mm_insert_node_in_range_generic() or one of the
915 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
916 * the just freed block (because it's at the top of the free_stack list).
917 *
918 * Returns:
919 * True if this block should be evicted, false otherwise. Will always
920 * return false when no hole has been found.
921 */
922 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
923 struct drm_mm_node *node)
924 {
925 struct drm_mm_node *prev_node;
926
927 DRM_MM_BUG_ON(node->mm != scan->mm);
928 DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
929 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
930
931 DRM_MM_BUG_ON(!node->mm->scan_active);
932 node->mm->scan_active--;
933
934 /* During drm_mm_scan_add_block() we decoupled this node leaving
935 * its pointers intact. Now that the caller is walking back along
936 * the eviction list we can restore this block into its rightful
937 * place on the full node_list. To confirm that the caller is walking
938 * backwards correctly we check that prev_node->next == node->next,
939 * i.e. both believe the same node should be on the other side of the
940 * hole.
941 */
942 prev_node = list_prev_entry(node, node_list);
943 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
944 list_next_entry(node, node_list));
945 list_add(&node->node_list, &prev_node->node_list);
946
947 return (node->start + node->size > scan->hit_start &&
948 node->start < scan->hit_end);
949 }
950 EXPORT_SYMBOL(drm_mm_scan_remove_block);
951
952 /**
953 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
954 * @scan: drm_mm scan with target hole
955 *
956 * After completing an eviction scan and removing the selected nodes, we may
957 * need to remove a few more nodes from either side of the target hole if
958 * mm.color_adjust is being used.
959 *
960 * Returns:
961 * A node to evict, or NULL if there are no overlapping nodes.
962 */
963 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
964 {
965 struct drm_mm *mm = scan->mm;
966 struct drm_mm_node *hole;
967 u64 hole_start, hole_end;
968
969 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
970
971 if (!mm->color_adjust)
972 return NULL;
973
974 /*
975 * The hole found during scanning should ideally be the first element
976 * in the hole_stack list, but due to side-effects in the driver it
977 * may not be.
978 */
979 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
980 hole_start = __drm_mm_hole_node_start(hole);
981 hole_end = hole_start + hole->hole_size;
982
983 if (hole_start <= scan->hit_start &&
984 hole_end >= scan->hit_end)
985 break;
986 }
987
988 /* We should only be called after we found the hole previously */
989 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
990 if (unlikely(&hole->hole_stack == &mm->hole_stack))
991 return NULL;
992
993 DRM_MM_BUG_ON(hole_start > scan->hit_start);
994 DRM_MM_BUG_ON(hole_end < scan->hit_end);
995
996 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
997 if (hole_start > scan->hit_start)
998 return hole;
999 if (hole_end < scan->hit_end)
1000 return list_next_entry(hole, node_list);
1001
1002 return NULL;
1003 }
1004 EXPORT_SYMBOL(drm_mm_scan_color_evict);
1005
1006 /**
1007 * drm_mm_init - initialize a drm-mm allocator
1008 * @mm: the drm_mm structure to initialize
1009 * @start: start of the range managed by @mm
1010 * @size: end of the range managed by @mm
1011 *
1012 * Note that @mm must be cleared to 0 before calling this function.
1013 */
1014 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
1015 {
1016 DRM_MM_BUG_ON(start + size <= start);
1017
1018 mm->color_adjust = NULL;
1019
1020 INIT_LIST_HEAD(&mm->hole_stack);
1021 #ifdef __NetBSD__
1022 drm_mm_interval_tree_init(&mm->interval_tree);
1023 rb_tree_init(&mm->holes_size.rb_root.rbr_tree, &holes_size_rb_ops);
1024 rb_tree_init(&mm->holes_addr.rbr_tree, &holes_addr_rb_ops);
1025 #else
1026 mm->interval_tree = RB_ROOT_CACHED;
1027 mm->holes_size = RB_ROOT_CACHED;
1028 mm->holes_addr = RB_ROOT;
1029 #endif
1030
1031 /* Clever trick to avoid a special case in the free hole tracking. */
1032 INIT_LIST_HEAD(&mm->head_node.node_list);
1033 mm->head_node.flags = 0;
1034 mm->head_node.mm = mm;
1035 mm->head_node.start = start + size;
1036 mm->head_node.size = -size;
1037 add_hole(&mm->head_node);
1038
1039 mm->scan_active = 0;
1040 }
1041 EXPORT_SYMBOL(drm_mm_init);
1042
1043 /**
1044 * drm_mm_takedown - clean up a drm_mm allocator
1045 * @mm: drm_mm allocator to clean up
1046 *
1047 * Note that it is a bug to call this function on an allocator which is not
1048 * clean.
1049 */
1050 void drm_mm_takedown(struct drm_mm *mm)
1051 {
1052 if (WARN(!drm_mm_clean(mm),
1053 "Memory manager not clean during takedown.\n"))
1054 show_leaks(mm);
1055 }
1056 EXPORT_SYMBOL(drm_mm_takedown);
1057
1058 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1059 {
1060 u64 start, size;
1061
1062 size = entry->hole_size;
1063 if (size) {
1064 start = drm_mm_hole_node_start(entry);
1065 drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": free\n",
1066 start, start + size, size);
1067 }
1068
1069 return size;
1070 }
1071 /**
1072 * drm_mm_print - print allocator state
1073 * @mm: drm_mm allocator to print
1074 * @p: DRM printer to use
1075 */
1076 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1077 {
1078 const struct drm_mm_node *entry;
1079 u64 total_used = 0, total_free = 0, total = 0;
1080
1081 total_free += drm_mm_dump_hole(p, &mm->head_node);
1082
1083 drm_mm_for_each_node(entry, mm) {
1084 drm_printf(p, "%#018llx-%#018llx: %"PRIu64": used\n", entry->start,
1085 entry->start + entry->size, entry->size);
1086 total_used += entry->size;
1087 total_free += drm_mm_dump_hole(p, entry);
1088 }
1089 total = total_free + total_used;
1090
1091 drm_printf(p, "total: %"PRIu64", used %"PRIu64" free %"PRIu64"\n", total,
1092 total_used, total_free);
1093 }
1094 EXPORT_SYMBOL(drm_mm_print);
1095