Home | History | Annotate | Line # | Download | only in drm
drm_mm.c revision 1.7
      1 /*	$NetBSD: drm_mm.c,v 1.7 2021/12/18 23:44:57 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
      6  * Copyright 2016 Intel Corporation
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  *
     30  **************************************************************************/
     31 
     32 /*
     33  * Generic simple memory manager implementation. Intended to be used as a base
     34  * class implementation for more advanced memory managers.
     35  *
     36  * Note that the algorithm used is quite simple and there might be substantial
     37  * performance gains if a smarter free list is implemented. Currently it is
     38  * just an unordered stack of free regions. This could easily be improved if
     39  * an RB-tree is used instead. At least if we expect heavy fragmentation.
     40  *
     41  * Aligned allocations can also see improvement.
     42  *
     43  * Authors:
     44  * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.7 2021/12/18 23:44:57 riastradh Exp $");
     49 
     50 #include <linux/export.h>
     51 #include <linux/interval_tree_generic.h>
     52 #include <linux/seq_file.h>
     53 #include <linux/slab.h>
     54 #include <linux/stacktrace.h>
     55 
     56 #include <drm/drm_mm.h>
     57 
     58 /**
     59  * DOC: Overview
     60  *
     61  * drm_mm provides a simple range allocator. The drivers are free to use the
     62  * resource allocator from the linux core if it suits them, the upside of drm_mm
     63  * is that it's in the DRM core. Which means that it's easier to extend for
     64  * some of the crazier special purpose needs of gpus.
     65  *
     66  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
     67  * Drivers are free to embed either of them into their own suitable
     68  * datastructures. drm_mm itself will not do any memory allocations of its own,
     69  * so if drivers choose not to embed nodes they need to still allocate them
     70  * themselves.
     71  *
     72  * The range allocator also supports reservation of preallocated blocks. This is
     73  * useful for taking over initial mode setting configurations from the firmware,
     74  * where an object needs to be created which exactly matches the firmware's
     75  * scanout target. As long as the range is still free it can be inserted anytime
     76  * after the allocator is initialized, which helps with avoiding looped
     77  * dependencies in the driver load sequence.
     78  *
     79  * drm_mm maintains a stack of most recently freed holes, which of all
     80  * simplistic datastructures seems to be a fairly decent approach to clustering
     81  * allocations and avoiding too much fragmentation. This means free space
     82  * searches are O(num_holes). Given that all the fancy features drm_mm supports
     83  * something better would be fairly complex and since gfx thrashing is a fairly
     84  * steep cliff not a real concern. Removing a node again is O(1).
     85  *
     86  * drm_mm supports a few features: Alignment and range restrictions can be
     87  * supplied. Furthermore every &drm_mm_node has a color value (which is just an
     88  * opaque unsigned long) which in conjunction with a driver callback can be used
     89  * to implement sophisticated placement restrictions. The i915 DRM driver uses
     90  * this to implement guard pages between incompatible caching domains in the
     91  * graphics TT.
     92  *
     93  * Two behaviors are supported for searching and allocating: bottom-up and
     94  * top-down. The default is bottom-up. Top-down allocation can be used if the
     95  * memory area has different restrictions, or just to reduce fragmentation.
     96  *
     97  * Finally iteration helpers to walk all nodes and all holes are provided as are
     98  * some basic allocator dumpers for debugging.
     99  *
    100  * Note that this range allocator is not thread-safe, drivers need to protect
    101  * modifications with their own locking. The idea behind this is that for a full
    102  * memory manager additional data needs to be protected anyway, hence internal
    103  * locking would be fully redundant.
    104  */
    105 
    106 #ifdef CONFIG_DRM_DEBUG_MM
    107 #include <linux/stackdepot.h>
    108 
    109 #define STACKDEPTH 32
    110 #define BUFSZ 4096
    111 
    112 static noinline void save_stack(struct drm_mm_node *node)
    113 {
    114 	unsigned long entries[STACKDEPTH];
    115 	unsigned int n;
    116 
    117 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
    118 
    119 	/* May be called under spinlock, so avoid sleeping */
    120 	node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
    121 }
    122 
    123 static void show_leaks(struct drm_mm *mm)
    124 {
    125 	struct drm_mm_node *node;
    126 	unsigned long *entries;
    127 	unsigned int nr_entries;
    128 	char *buf;
    129 
    130 	buf = kmalloc(BUFSZ, GFP_KERNEL);
    131 	if (!buf)
    132 		return;
    133 
    134 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
    135 		if (!node->stack) {
    136 			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
    137 				  node->start, node->size);
    138 			continue;
    139 		}
    140 
    141 		nr_entries = stack_depot_fetch(node->stack, &entries);
    142 		stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
    143 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
    144 			  node->start, node->size, buf);
    145 	}
    146 
    147 	kfree(buf);
    148 }
    149 
    150 #undef STACKDEPTH
    151 #undef BUFSZ
    152 #else
    153 static void save_stack(struct drm_mm_node *node) { }
    154 static void show_leaks(struct drm_mm *mm) { }
    155 #endif
    156 
    157 #define START(node) ((node)->start)
    158 #define LAST(node)  ((node)->start + (node)->size - 1)
    159 
    160 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
    161 		     u64, __subtree_last,
    162 		     START, LAST, static inline, drm_mm_interval_tree)
    163 
    164 struct drm_mm_node *
    165 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
    166 {
    167 	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
    168 					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
    169 }
    170 EXPORT_SYMBOL(__drm_mm_interval_first);
    171 
    172 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
    173 					  struct drm_mm_node *node)
    174 {
    175 	struct drm_mm *mm = hole_node->mm;
    176 	struct rb_node **link, *rb;
    177 	struct drm_mm_node *parent;
    178 	bool leftmost;
    179 
    180 	node->__subtree_last = LAST(node);
    181 
    182 	if (drm_mm_node_allocated(hole_node)) {
    183 		rb = &hole_node->rb;
    184 		while (rb) {
    185 			parent = rb_entry(rb, struct drm_mm_node, rb);
    186 			if (parent->__subtree_last >= node->__subtree_last)
    187 				break;
    188 
    189 			parent->__subtree_last = node->__subtree_last;
    190 			rb = rb_parent(rb);
    191 		}
    192 
    193 		rb = &hole_node->rb;
    194 		link = &hole_node->rb.rb_right;
    195 		leftmost = false;
    196 	} else {
    197 		rb = NULL;
    198 		link = &mm->interval_tree.rb_root.rb_node;
    199 		leftmost = true;
    200 	}
    201 
    202 	while (*link) {
    203 		rb = *link;
    204 		parent = rb_entry(rb, struct drm_mm_node, rb);
    205 		if (parent->__subtree_last < node->__subtree_last)
    206 			parent->__subtree_last = node->__subtree_last;
    207 		if (node->start < parent->start) {
    208 			link = &parent->rb.rb_left;
    209 		} else {
    210 			link = &parent->rb.rb_right;
    211 			leftmost = false;
    212 		}
    213 	}
    214 
    215 	rb_link_node(&node->rb, rb, link);
    216 	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
    217 				   &drm_mm_interval_tree_augment);
    218 }
    219 
    220 #define RB_INSERT(root, member, expr) do { \
    221 	struct rb_node **link = &root.rb_node, *rb = NULL; \
    222 	u64 x = expr(node); \
    223 	while (*link) { \
    224 		rb = *link; \
    225 		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
    226 			link = &rb->rb_left; \
    227 		else \
    228 			link = &rb->rb_right; \
    229 	} \
    230 	rb_link_node(&node->member, rb, link); \
    231 	rb_insert_color(&node->member, &root); \
    232 } while (0)
    233 
    234 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
    235 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
    236 
    237 static u64 rb_to_hole_size(struct rb_node *rb)
    238 {
    239 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
    240 }
    241 
    242 static void insert_hole_size(struct rb_root_cached *root,
    243 			     struct drm_mm_node *node)
    244 {
    245 	struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
    246 	u64 x = node->hole_size;
    247 	bool first = true;
    248 
    249 	while (*link) {
    250 		rb = *link;
    251 		if (x > rb_to_hole_size(rb)) {
    252 			link = &rb->rb_left;
    253 		} else {
    254 			link = &rb->rb_right;
    255 			first = false;
    256 		}
    257 	}
    258 
    259 	rb_link_node(&node->rb_hole_size, rb, link);
    260 	rb_insert_color_cached(&node->rb_hole_size, root, first);
    261 }
    262 
    263 static void add_hole(struct drm_mm_node *node)
    264 {
    265 	struct drm_mm *mm = node->mm;
    266 
    267 	node->hole_size =
    268 		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
    269 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
    270 
    271 	insert_hole_size(&mm->holes_size, node);
    272 	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
    273 
    274 	list_add(&node->hole_stack, &mm->hole_stack);
    275 }
    276 
    277 static void rm_hole(struct drm_mm_node *node)
    278 {
    279 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
    280 
    281 	list_del(&node->hole_stack);
    282 	rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
    283 	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
    284 	node->hole_size = 0;
    285 
    286 	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
    287 }
    288 
    289 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
    290 {
    291 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
    292 }
    293 
    294 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
    295 {
    296 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
    297 }
    298 
    299 static inline u64 rb_hole_size(struct rb_node *rb)
    300 {
    301 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
    302 }
    303 
    304 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
    305 {
    306 	struct rb_node *rb = mm->holes_size.rb_root.rb_node;
    307 	struct drm_mm_node *best = NULL;
    308 
    309 	do {
    310 		struct drm_mm_node *node =
    311 			rb_entry(rb, struct drm_mm_node, rb_hole_size);
    312 
    313 		if (size <= node->hole_size) {
    314 			best = node;
    315 			rb = rb->rb_right;
    316 		} else {
    317 			rb = rb->rb_left;
    318 		}
    319 	} while (rb);
    320 
    321 	return best;
    322 }
    323 
    324 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
    325 {
    326 	struct rb_node *rb = mm->holes_addr.rb_node;
    327 	struct drm_mm_node *node = NULL;
    328 
    329 	while (rb) {
    330 		u64 hole_start;
    331 
    332 		node = rb_hole_addr_to_node(rb);
    333 		hole_start = __drm_mm_hole_node_start(node);
    334 
    335 		if (addr < hole_start)
    336 			rb = node->rb_hole_addr.rb_left;
    337 		else if (addr > hole_start + node->hole_size)
    338 			rb = node->rb_hole_addr.rb_right;
    339 		else
    340 			break;
    341 	}
    342 
    343 	return node;
    344 }
    345 
    346 static struct drm_mm_node *
    347 first_hole(struct drm_mm *mm,
    348 	   u64 start, u64 end, u64 size,
    349 	   enum drm_mm_insert_mode mode)
    350 {
    351 	switch (mode) {
    352 	default:
    353 	case DRM_MM_INSERT_BEST:
    354 		return best_hole(mm, size);
    355 
    356 	case DRM_MM_INSERT_LOW:
    357 		return find_hole(mm, start);
    358 
    359 	case DRM_MM_INSERT_HIGH:
    360 		return find_hole(mm, end);
    361 
    362 	case DRM_MM_INSERT_EVICT:
    363 		return list_first_entry_or_null(&mm->hole_stack,
    364 						struct drm_mm_node,
    365 						hole_stack);
    366 	}
    367 }
    368 
    369 static struct drm_mm_node *
    370 next_hole(struct drm_mm *mm,
    371 	  struct drm_mm_node *node,
    372 	  enum drm_mm_insert_mode mode)
    373 {
    374 	switch (mode) {
    375 	default:
    376 	case DRM_MM_INSERT_BEST:
    377 		return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
    378 
    379 	case DRM_MM_INSERT_LOW:
    380 		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
    381 
    382 	case DRM_MM_INSERT_HIGH:
    383 		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
    384 
    385 	case DRM_MM_INSERT_EVICT:
    386 		node = list_next_entry(node, hole_stack);
    387 		return &node->hole_stack == &mm->hole_stack ? NULL : node;
    388 	}
    389 }
    390 
    391 /**
    392  * drm_mm_reserve_node - insert an pre-initialized node
    393  * @mm: drm_mm allocator to insert @node into
    394  * @node: drm_mm_node to insert
    395  *
    396  * This functions inserts an already set-up &drm_mm_node into the allocator,
    397  * meaning that start, size and color must be set by the caller. All other
    398  * fields must be cleared to 0. This is useful to initialize the allocator with
    399  * preallocated objects which must be set-up before the range allocator can be
    400  * set-up, e.g. when taking over a firmware framebuffer.
    401  *
    402  * Returns:
    403  * 0 on success, -ENOSPC if there's no hole where @node is.
    404  */
    405 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
    406 {
    407 	u64 end = node->start + node->size;
    408 	struct drm_mm_node *hole;
    409 	u64 hole_start, hole_end;
    410 	u64 adj_start, adj_end;
    411 
    412 	end = node->start + node->size;
    413 	if (unlikely(end <= node->start))
    414 		return -ENOSPC;
    415 
    416 	/* Find the relevant hole to add our node to */
    417 	hole = find_hole(mm, node->start);
    418 	if (!hole)
    419 		return -ENOSPC;
    420 
    421 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
    422 	adj_end = hole_end = hole_start + hole->hole_size;
    423 
    424 	if (mm->color_adjust)
    425 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
    426 
    427 	if (adj_start > node->start || adj_end < end)
    428 		return -ENOSPC;
    429 
    430 	node->mm = mm;
    431 
    432 	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    433 	list_add(&node->node_list, &hole->node_list);
    434 	drm_mm_interval_tree_add_node(hole, node);
    435 	node->hole_size = 0;
    436 
    437 	rm_hole(hole);
    438 	if (node->start > hole_start)
    439 		add_hole(hole);
    440 	if (end < hole_end)
    441 		add_hole(node);
    442 
    443 	save_stack(node);
    444 	return 0;
    445 }
    446 EXPORT_SYMBOL(drm_mm_reserve_node);
    447 
    448 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
    449 {
    450 	return rb ? rb_to_hole_size(rb) : 0;
    451 }
    452 
    453 /**
    454  * drm_mm_insert_node_in_range - ranged search for space and insert @node
    455  * @mm: drm_mm to allocate from
    456  * @node: preallocate node to insert
    457  * @size: size of the allocation
    458  * @alignment: alignment of the allocation
    459  * @color: opaque tag value to use for this node
    460  * @range_start: start of the allowed range for this node
    461  * @range_end: end of the allowed range for this node
    462  * @mode: fine-tune the allocation search and placement
    463  *
    464  * The preallocated @node must be cleared to 0.
    465  *
    466  * Returns:
    467  * 0 on success, -ENOSPC if there's no suitable hole.
    468  */
    469 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
    470 				struct drm_mm_node * const node,
    471 				u64 size, u64 alignment,
    472 				unsigned long color,
    473 				u64 range_start, u64 range_end,
    474 				enum drm_mm_insert_mode mode)
    475 {
    476 	struct drm_mm_node *hole;
    477 	u64 remainder_mask;
    478 	bool once;
    479 
    480 	DRM_MM_BUG_ON(range_start > range_end);
    481 
    482 	if (unlikely(size == 0 || range_end - range_start < size))
    483 		return -ENOSPC;
    484 
    485 	if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
    486 		return -ENOSPC;
    487 
    488 	if (alignment <= 1)
    489 		alignment = 0;
    490 
    491 	once = mode & DRM_MM_INSERT_ONCE;
    492 	mode &= ~DRM_MM_INSERT_ONCE;
    493 
    494 	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
    495 	for (hole = first_hole(mm, range_start, range_end, size, mode);
    496 	     hole;
    497 	     hole = once ? NULL : next_hole(mm, hole, mode)) {
    498 		u64 hole_start = __drm_mm_hole_node_start(hole);
    499 		u64 hole_end = hole_start + hole->hole_size;
    500 		u64 adj_start, adj_end;
    501 		u64 col_start, col_end;
    502 
    503 		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
    504 			break;
    505 
    506 		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
    507 			break;
    508 
    509 		col_start = hole_start;
    510 		col_end = hole_end;
    511 		if (mm->color_adjust)
    512 			mm->color_adjust(hole, color, &col_start, &col_end);
    513 
    514 		adj_start = max(col_start, range_start);
    515 		adj_end = min(col_end, range_end);
    516 
    517 		if (adj_end <= adj_start || adj_end - adj_start < size)
    518 			continue;
    519 
    520 		if (mode == DRM_MM_INSERT_HIGH)
    521 			adj_start = adj_end - size;
    522 
    523 		if (alignment) {
    524 			u64 rem;
    525 
    526 			if (likely(remainder_mask))
    527 				rem = adj_start & remainder_mask;
    528 			else
    529 				div64_u64_rem(adj_start, alignment, &rem);
    530 			if (rem) {
    531 				adj_start -= rem;
    532 				if (mode != DRM_MM_INSERT_HIGH)
    533 					adj_start += alignment;
    534 
    535 				if (adj_start < max(col_start, range_start) ||
    536 				    min(col_end, range_end) - adj_start < size)
    537 					continue;
    538 
    539 				if (adj_end <= adj_start ||
    540 				    adj_end - adj_start < size)
    541 					continue;
    542 			}
    543 		}
    544 
    545 		node->mm = mm;
    546 		node->size = size;
    547 		node->start = adj_start;
    548 		node->color = color;
    549 		node->hole_size = 0;
    550 
    551 		__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    552 		list_add(&node->node_list, &hole->node_list);
    553 		drm_mm_interval_tree_add_node(hole, node);
    554 
    555 		rm_hole(hole);
    556 		if (adj_start > hole_start)
    557 			add_hole(hole);
    558 		if (adj_start + size < hole_end)
    559 			add_hole(node);
    560 
    561 		save_stack(node);
    562 		return 0;
    563 	}
    564 
    565 	return -ENOSPC;
    566 }
    567 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
    568 
    569 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
    570 {
    571 	return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
    572 }
    573 
    574 /**
    575  * drm_mm_remove_node - Remove a memory node from the allocator.
    576  * @node: drm_mm_node to remove
    577  *
    578  * This just removes a node from its drm_mm allocator. The node does not need to
    579  * be cleared again before it can be re-inserted into this or any other drm_mm
    580  * allocator. It is a bug to call this function on a unallocated node.
    581  */
    582 void drm_mm_remove_node(struct drm_mm_node *node)
    583 {
    584 	struct drm_mm *mm = node->mm;
    585 	struct drm_mm_node *prev_node;
    586 
    587 	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
    588 	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
    589 
    590 	prev_node = list_prev_entry(node, node_list);
    591 
    592 	if (drm_mm_hole_follows(node))
    593 		rm_hole(node);
    594 
    595 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
    596 	list_del(&node->node_list);
    597 
    598 	if (drm_mm_hole_follows(prev_node))
    599 		rm_hole(prev_node);
    600 	add_hole(prev_node);
    601 
    602 	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    603 }
    604 EXPORT_SYMBOL(drm_mm_remove_node);
    605 
    606 /**
    607  * drm_mm_replace_node - move an allocation from @old to @new
    608  * @old: drm_mm_node to remove from the allocator
    609  * @new: drm_mm_node which should inherit @old's allocation
    610  *
    611  * This is useful for when drivers embed the drm_mm_node structure and hence
    612  * can't move allocations by reassigning pointers. It's a combination of remove
    613  * and insert with the guarantee that the allocation start will match.
    614  */
    615 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
    616 {
    617 	struct drm_mm *mm = old->mm;
    618 
    619 	DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
    620 
    621 	*new = *old;
    622 
    623 	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
    624 	list_replace(&old->node_list, &new->node_list);
    625 	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
    626 
    627 	if (drm_mm_hole_follows(old)) {
    628 		list_replace(&old->hole_stack, &new->hole_stack);
    629 		rb_replace_node_cached(&old->rb_hole_size,
    630 				       &new->rb_hole_size,
    631 				       &mm->holes_size);
    632 		rb_replace_node(&old->rb_hole_addr,
    633 				&new->rb_hole_addr,
    634 				&mm->holes_addr);
    635 	}
    636 
    637 	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
    638 }
    639 EXPORT_SYMBOL(drm_mm_replace_node);
    640 
    641 /**
    642  * DOC: lru scan roster
    643  *
    644  * Very often GPUs need to have continuous allocations for a given object. When
    645  * evicting objects to make space for a new one it is therefore not most
    646  * efficient when we simply start to select all objects from the tail of an LRU
    647  * until there's a suitable hole: Especially for big objects or nodes that
    648  * otherwise have special allocation constraints there's a good chance we evict
    649  * lots of (smaller) objects unnecessarily.
    650  *
    651  * The DRM range allocator supports this use-case through the scanning
    652  * interfaces. First a scan operation needs to be initialized with
    653  * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
    654  * objects to the roster, probably by walking an LRU list, but this can be
    655  * freely implemented. Eviction candiates are added using
    656  * drm_mm_scan_add_block() until a suitable hole is found or there are no
    657  * further evictable objects. Eviction roster metadata is tracked in &struct
    658  * drm_mm_scan.
    659  *
    660  * The driver must walk through all objects again in exactly the reverse
    661  * order to restore the allocator state. Note that while the allocator is used
    662  * in the scan mode no other operation is allowed.
    663  *
    664  * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
    665  * reported true) in the scan, and any overlapping nodes after color adjustment
    666  * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
    667  * since freeing a node is also O(1) the overall complexity is
    668  * O(scanned_objects). So like the free stack which needs to be walked before a
    669  * scan operation even begins this is linear in the number of objects. It
    670  * doesn't seem to hurt too badly.
    671  */
    672 
    673 /**
    674  * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
    675  * @scan: scan state
    676  * @mm: drm_mm to scan
    677  * @size: size of the allocation
    678  * @alignment: alignment of the allocation
    679  * @color: opaque tag value to use for the allocation
    680  * @start: start of the allowed range for the allocation
    681  * @end: end of the allowed range for the allocation
    682  * @mode: fine-tune the allocation search and placement
    683  *
    684  * This simply sets up the scanning routines with the parameters for the desired
    685  * hole.
    686  *
    687  * Warning:
    688  * As long as the scan list is non-empty, no other operations than
    689  * adding/removing nodes to/from the scan list are allowed.
    690  */
    691 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
    692 				 struct drm_mm *mm,
    693 				 u64 size,
    694 				 u64 alignment,
    695 				 unsigned long color,
    696 				 u64 start,
    697 				 u64 end,
    698 				 enum drm_mm_insert_mode mode)
    699 {
    700 	DRM_MM_BUG_ON(start >= end);
    701 	DRM_MM_BUG_ON(!size || size > end - start);
    702 	DRM_MM_BUG_ON(mm->scan_active);
    703 
    704 	scan->mm = mm;
    705 
    706 	if (alignment <= 1)
    707 		alignment = 0;
    708 
    709 	scan->color = color;
    710 	scan->alignment = alignment;
    711 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
    712 	scan->size = size;
    713 	scan->mode = mode;
    714 
    715 	DRM_MM_BUG_ON(end <= start);
    716 	scan->range_start = start;
    717 	scan->range_end = end;
    718 
    719 	scan->hit_start = U64_MAX;
    720 	scan->hit_end = 0;
    721 }
    722 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
    723 
    724 /**
    725  * drm_mm_scan_add_block - add a node to the scan list
    726  * @scan: the active drm_mm scanner
    727  * @node: drm_mm_node to add
    728  *
    729  * Add a node to the scan list that might be freed to make space for the desired
    730  * hole.
    731  *
    732  * Returns:
    733  * True if a hole has been found, false otherwise.
    734  */
    735 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
    736 			   struct drm_mm_node *node)
    737 {
    738 	struct drm_mm *mm = scan->mm;
    739 	struct drm_mm_node *hole;
    740 	u64 hole_start, hole_end;
    741 	u64 col_start, col_end;
    742 	u64 adj_start, adj_end;
    743 
    744 	DRM_MM_BUG_ON(node->mm != mm);
    745 	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
    746 	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
    747 	__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
    748 	mm->scan_active++;
    749 
    750 	/* Remove this block from the node_list so that we enlarge the hole
    751 	 * (distance between the end of our previous node and the start of
    752 	 * or next), without poisoning the link so that we can restore it
    753 	 * later in drm_mm_scan_remove_block().
    754 	 */
    755 	hole = list_prev_entry(node, node_list);
    756 	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
    757 	__list_del_entry(&node->node_list);
    758 
    759 	hole_start = __drm_mm_hole_node_start(hole);
    760 	hole_end = __drm_mm_hole_node_end(hole);
    761 
    762 	col_start = hole_start;
    763 	col_end = hole_end;
    764 	if (mm->color_adjust)
    765 		mm->color_adjust(hole, scan->color, &col_start, &col_end);
    766 
    767 	adj_start = max(col_start, scan->range_start);
    768 	adj_end = min(col_end, scan->range_end);
    769 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
    770 		return false;
    771 
    772 	if (scan->mode == DRM_MM_INSERT_HIGH)
    773 		adj_start = adj_end - scan->size;
    774 
    775 	if (scan->alignment) {
    776 		u64 rem;
    777 
    778 		if (likely(scan->remainder_mask))
    779 			rem = adj_start & scan->remainder_mask;
    780 		else
    781 			div64_u64_rem(adj_start, scan->alignment, &rem);
    782 		if (rem) {
    783 			adj_start -= rem;
    784 			if (scan->mode != DRM_MM_INSERT_HIGH)
    785 				adj_start += scan->alignment;
    786 			if (adj_start < max(col_start, scan->range_start) ||
    787 			    min(col_end, scan->range_end) - adj_start < scan->size)
    788 				return false;
    789 
    790 			if (adj_end <= adj_start ||
    791 			    adj_end - adj_start < scan->size)
    792 				return false;
    793 		}
    794 	}
    795 
    796 	scan->hit_start = adj_start;
    797 	scan->hit_end = adj_start + scan->size;
    798 
    799 	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
    800 	DRM_MM_BUG_ON(scan->hit_start < hole_start);
    801 	DRM_MM_BUG_ON(scan->hit_end > hole_end);
    802 
    803 	return true;
    804 }
    805 EXPORT_SYMBOL(drm_mm_scan_add_block);
    806 
    807 /**
    808  * drm_mm_scan_remove_block - remove a node from the scan list
    809  * @scan: the active drm_mm scanner
    810  * @node: drm_mm_node to remove
    811  *
    812  * Nodes **must** be removed in exactly the reverse order from the scan list as
    813  * they have been added (e.g. using list_add() as they are added and then
    814  * list_for_each() over that eviction list to remove), otherwise the internal
    815  * state of the memory manager will be corrupted.
    816  *
    817  * When the scan list is empty, the selected memory nodes can be freed. An
    818  * immediately following drm_mm_insert_node_in_range_generic() or one of the
    819  * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
    820  * the just freed block (because it's at the top of the free_stack list).
    821  *
    822  * Returns:
    823  * True if this block should be evicted, false otherwise. Will always
    824  * return false when no hole has been found.
    825  */
    826 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
    827 			      struct drm_mm_node *node)
    828 {
    829 	struct drm_mm_node *prev_node;
    830 
    831 	DRM_MM_BUG_ON(node->mm != scan->mm);
    832 	DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
    833 	__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
    834 
    835 	DRM_MM_BUG_ON(!node->mm->scan_active);
    836 	node->mm->scan_active--;
    837 
    838 	/* During drm_mm_scan_add_block() we decoupled this node leaving
    839 	 * its pointers intact. Now that the caller is walking back along
    840 	 * the eviction list we can restore this block into its rightful
    841 	 * place on the full node_list. To confirm that the caller is walking
    842 	 * backwards correctly we check that prev_node->next == node->next,
    843 	 * i.e. both believe the same node should be on the other side of the
    844 	 * hole.
    845 	 */
    846 	prev_node = list_prev_entry(node, node_list);
    847 	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
    848 		      list_next_entry(node, node_list));
    849 	list_add(&node->node_list, &prev_node->node_list);
    850 
    851 	return (node->start + node->size > scan->hit_start &&
    852 		node->start < scan->hit_end);
    853 }
    854 EXPORT_SYMBOL(drm_mm_scan_remove_block);
    855 
    856 /**
    857  * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
    858  * @scan: drm_mm scan with target hole
    859  *
    860  * After completing an eviction scan and removing the selected nodes, we may
    861  * need to remove a few more nodes from either side of the target hole if
    862  * mm.color_adjust is being used.
    863  *
    864  * Returns:
    865  * A node to evict, or NULL if there are no overlapping nodes.
    866  */
    867 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
    868 {
    869 	struct drm_mm *mm = scan->mm;
    870 	struct drm_mm_node *hole;
    871 	u64 hole_start, hole_end;
    872 
    873 	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
    874 
    875 	if (!mm->color_adjust)
    876 		return NULL;
    877 
    878 	/*
    879 	 * The hole found during scanning should ideally be the first element
    880 	 * in the hole_stack list, but due to side-effects in the driver it
    881 	 * may not be.
    882 	 */
    883 	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
    884 		hole_start = __drm_mm_hole_node_start(hole);
    885 		hole_end = hole_start + hole->hole_size;
    886 
    887 		if (hole_start <= scan->hit_start &&
    888 		    hole_end >= scan->hit_end)
    889 			break;
    890 	}
    891 
    892 	/* We should only be called after we found the hole previously */
    893 	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
    894 	if (unlikely(&hole->hole_stack == &mm->hole_stack))
    895 		return NULL;
    896 
    897 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
    898 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
    899 
    900 	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
    901 	if (hole_start > scan->hit_start)
    902 		return hole;
    903 	if (hole_end < scan->hit_end)
    904 		return list_next_entry(hole, node_list);
    905 
    906 	return NULL;
    907 }
    908 EXPORT_SYMBOL(drm_mm_scan_color_evict);
    909 
    910 /**
    911  * drm_mm_init - initialize a drm-mm allocator
    912  * @mm: the drm_mm structure to initialize
    913  * @start: start of the range managed by @mm
    914  * @size: end of the range managed by @mm
    915  *
    916  * Note that @mm must be cleared to 0 before calling this function.
    917  */
    918 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
    919 {
    920 	DRM_MM_BUG_ON(start + size <= start);
    921 
    922 	mm->color_adjust = NULL;
    923 
    924 	INIT_LIST_HEAD(&mm->hole_stack);
    925 	mm->interval_tree = RB_ROOT_CACHED;
    926 	mm->holes_size = RB_ROOT_CACHED;
    927 	mm->holes_addr = RB_ROOT;
    928 
    929 	/* Clever trick to avoid a special case in the free hole tracking. */
    930 	INIT_LIST_HEAD(&mm->head_node.node_list);
    931 	mm->head_node.flags = 0;
    932 	mm->head_node.mm = mm;
    933 	mm->head_node.start = start + size;
    934 	mm->head_node.size = -size;
    935 	add_hole(&mm->head_node);
    936 
    937 	mm->scan_active = 0;
    938 }
    939 EXPORT_SYMBOL(drm_mm_init);
    940 
    941 /**
    942  * drm_mm_takedown - clean up a drm_mm allocator
    943  * @mm: drm_mm allocator to clean up
    944  *
    945  * Note that it is a bug to call this function on an allocator which is not
    946  * clean.
    947  */
    948 void drm_mm_takedown(struct drm_mm *mm)
    949 {
    950 	if (WARN(!drm_mm_clean(mm),
    951 		 "Memory manager not clean during takedown.\n"))
    952 		show_leaks(mm);
    953 }
    954 EXPORT_SYMBOL(drm_mm_takedown);
    955 
    956 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
    957 {
    958 	u64 start, size;
    959 
    960 	size = entry->hole_size;
    961 	if (size) {
    962 		start = drm_mm_hole_node_start(entry);
    963 		drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": free\n",
    964 			   start, start + size, size);
    965 	}
    966 
    967 	return size;
    968 }
    969 /**
    970  * drm_mm_print - print allocator state
    971  * @mm: drm_mm allocator to print
    972  * @p: DRM printer to use
    973  */
    974 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
    975 {
    976 	const struct drm_mm_node *entry;
    977 	u64 total_used = 0, total_free = 0, total = 0;
    978 
    979 	total_free += drm_mm_dump_hole(p, &mm->head_node);
    980 
    981 	drm_mm_for_each_node(entry, mm) {
    982 		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
    983 			   entry->start + entry->size, entry->size);
    984 		total_used += entry->size;
    985 		total_free += drm_mm_dump_hole(p, entry);
    986 	}
    987 	total = total_free + total_used;
    988 
    989 	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
    990 		   total_used, total_free);
    991 }
    992 EXPORT_SYMBOL(drm_mm_print);
    993