Home | History | Annotate | Line # | Download | only in drm
drm_mm.c revision 1.3.28.1
      1 /*	$NetBSD: drm_mm.c,v 1.3.28.1 2018/09/06 06:56:09 pgoyette Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  *
     29  **************************************************************************/
     30 
     31 /*
     32  * Generic simple memory manager implementation. Intended to be used as a base
     33  * class implementation for more advanced memory managers.
     34  *
     35  * Note that the algorithm used is quite simple and there might be substantial
     36  * performance gains if a smarter free list is implemented. Currently it is just an
     37  * unordered stack of free regions. This could easily be improved if an RB-tree
     38  * is used instead. At least if we expect heavy fragmentation.
     39  *
     40  * Aligned allocations can also see improvement.
     41  *
     42  * Authors:
     43  * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
     44  */
     45 
     46 #include <sys/cdefs.h>
     47 __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.3.28.1 2018/09/06 06:56:09 pgoyette Exp $");
     48 
     49 #include <drm/drmP.h>
     50 #include <drm/drm_mm.h>
     51 #include <linux/slab.h>
     52 #include <linux/seq_file.h>
     53 #include <linux/export.h>
     54 #include <linux/printk.h>
     55 #include <asm/bug.h>
     56 #include <asm/div64.h>
     57 
     58 /**
     59  * DOC: Overview
     60  *
     61  * drm_mm provides a simple range allocator. The drivers are free to use the
     62  * resource allocator from the linux core if it suits them, the upside of drm_mm
     63  * is that it's in the DRM core. Which means that it's easier to extend for
     64  * some of the crazier special purpose needs of gpus.
     65  *
     66  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
     67  * Drivers are free to embed either of them into their own suitable
     68  * datastructures. drm_mm itself will not do any allocations of its own, so if
     69  * drivers choose not to embed nodes they need to still allocate them
     70  * themselves.
     71  *
     72  * The range allocator also supports reservation of preallocated blocks. This is
     73  * useful for taking over initial mode setting configurations from the firmware,
     74  * where an object needs to be created which exactly matches the firmware's
     75  * scanout target. As long as the range is still free it can be inserted anytime
     76  * after the allocator is initialized, which helps with avoiding looped
     77  * depencies in the driver load sequence.
     78  *
     79  * drm_mm maintains a stack of most recently freed holes, which of all
     80  * simplistic datastructures seems to be a fairly decent approach to clustering
     81  * allocations and avoiding too much fragmentation. This means free space
     82  * searches are O(num_holes). Given that all the fancy features drm_mm supports
     83  * something better would be fairly complex and since gfx thrashing is a fairly
     84  * steep cliff not a real concern. Removing a node again is O(1).
     85  *
     86  * drm_mm supports a few features: Alignment and range restrictions can be
     87  * supplied. Further more every &drm_mm_node has a color value (which is just an
     88  * opaqua unsigned long) which in conjunction with a driver callback can be used
     89  * to implement sophisticated placement restrictions. The i915 DRM driver uses
     90  * this to implement guard pages between incompatible caching domains in the
     91  * graphics TT.
     92  *
     93  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
     94  * The default is bottom-up. Top-down allocation can be used if the memory area
     95  * has different restrictions, or just to reduce fragmentation.
     96  *
     97  * Finally iteration helpers to walk all nodes and all holes are provided as are
     98  * some basic allocator dumpers for debugging.
     99  */
    100 
    101 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
    102 						u64 size,
    103 						unsigned alignment,
    104 						unsigned long color,
    105 						enum drm_mm_search_flags flags);
    106 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
    107 						u64 size,
    108 						unsigned alignment,
    109 						unsigned long color,
    110 						u64 start,
    111 						u64 end,
    112 						enum drm_mm_search_flags flags);
    113 
    114 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
    115 				 struct drm_mm_node *node,
    116 				 u64 size, unsigned alignment,
    117 				 unsigned long color,
    118 				 enum drm_mm_allocator_flags flags)
    119 {
    120 	struct drm_mm *mm = hole_node->mm;
    121 	u64 hole_start = drm_mm_hole_node_start(hole_node);
    122 	u64 hole_end = drm_mm_hole_node_end(hole_node);
    123 	u64 adj_start = hole_start;
    124 	u64 adj_end = hole_end;
    125 
    126 	BUG_ON(node->allocated);
    127 
    128 	if (mm->color_adjust)
    129 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
    130 
    131 	if (flags & DRM_MM_CREATE_TOP)
    132 		adj_start = adj_end - size;
    133 
    134 	if (alignment) {
    135 		u64 tmp = adj_start;
    136 		unsigned rem;
    137 
    138 		rem = do_div(tmp, alignment);
    139 		if (rem) {
    140 			if (flags & DRM_MM_CREATE_TOP)
    141 				adj_start -= rem;
    142 			else
    143 				adj_start += alignment - rem;
    144 		}
    145 	}
    146 
    147 	BUG_ON(adj_start < hole_start);
    148 	BUG_ON(adj_end > hole_end);
    149 
    150 	if (adj_start == hole_start) {
    151 		hole_node->hole_follows = 0;
    152 		list_del(&hole_node->hole_stack);
    153 	}
    154 
    155 	node->start = adj_start;
    156 	node->size = size;
    157 	node->mm = mm;
    158 	node->color = color;
    159 	node->allocated = 1;
    160 
    161 	INIT_LIST_HEAD(&node->hole_stack);
    162 	list_add(&node->node_list, &hole_node->node_list);
    163 
    164 	BUG_ON(node->start + node->size > adj_end);
    165 
    166 	node->hole_follows = 0;
    167 	if (__drm_mm_hole_node_start(node) < hole_end) {
    168 		list_add(&node->hole_stack, &mm->hole_stack);
    169 		node->hole_follows = 1;
    170 	}
    171 }
    172 
    173 /**
    174  * drm_mm_reserve_node - insert an pre-initialized node
    175  * @mm: drm_mm allocator to insert @node into
    176  * @node: drm_mm_node to insert
    177  *
    178  * This functions inserts an already set-up drm_mm_node into the allocator,
    179  * meaning that start, size and color must be set by the caller. This is useful
    180  * to initialize the allocator with preallocated objects which must be set-up
    181  * before the range allocator can be set-up, e.g. when taking over a firmware
    182  * framebuffer.
    183  *
    184  * Returns:
    185  * 0 on success, -ENOSPC if there's no hole where @node is.
    186  */
    187 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
    188 {
    189 	struct drm_mm_node *hole;
    190 	u64 end = node->start + node->size;
    191 	u64 hole_start;
    192 	u64 hole_end;
    193 
    194 	BUG_ON(node == NULL);
    195 
    196 	/* Find the relevant hole to add our node to */
    197 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
    198 		if (hole_start > node->start || hole_end < end)
    199 			continue;
    200 
    201 		node->mm = mm;
    202 		node->allocated = 1;
    203 
    204 		INIT_LIST_HEAD(&node->hole_stack);
    205 		list_add(&node->node_list, &hole->node_list);
    206 
    207 		if (node->start == hole_start) {
    208 			hole->hole_follows = 0;
    209 			list_del_init(&hole->hole_stack);
    210 		}
    211 
    212 		node->hole_follows = 0;
    213 		if (end != hole_end) {
    214 			list_add(&node->hole_stack, &mm->hole_stack);
    215 			node->hole_follows = 1;
    216 		}
    217 
    218 		return 0;
    219 	}
    220 
    221 	return -ENOSPC;
    222 }
    223 EXPORT_SYMBOL(drm_mm_reserve_node);
    224 
    225 /**
    226  * drm_mm_insert_node_generic - search for space and insert @node
    227  * @mm: drm_mm to allocate from
    228  * @node: preallocate node to insert
    229  * @size: size of the allocation
    230  * @alignment: alignment of the allocation
    231  * @color: opaque tag value to use for this node
    232  * @sflags: flags to fine-tune the allocation search
    233  * @aflags: flags to fine-tune the allocation behavior
    234  *
    235  * The preallocated node must be cleared to 0.
    236  *
    237  * Returns:
    238  * 0 on success, -ENOSPC if there's no suitable hole.
    239  */
    240 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
    241 			       u64 size, unsigned alignment,
    242 			       unsigned long color,
    243 			       enum drm_mm_search_flags sflags,
    244 			       enum drm_mm_allocator_flags aflags)
    245 {
    246 	struct drm_mm_node *hole_node;
    247 
    248 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
    249 					       color, sflags);
    250 	if (!hole_node)
    251 		return -ENOSPC;
    252 
    253 	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
    254 	return 0;
    255 }
    256 EXPORT_SYMBOL(drm_mm_insert_node_generic);
    257 
    258 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
    259 				       struct drm_mm_node *node,
    260 				       u64 size, unsigned alignment,
    261 				       unsigned long color,
    262 				       u64 start, u64 end,
    263 				       enum drm_mm_allocator_flags flags)
    264 {
    265 	struct drm_mm *mm = hole_node->mm;
    266 	u64 hole_start = drm_mm_hole_node_start(hole_node);
    267 	u64 hole_end = drm_mm_hole_node_end(hole_node);
    268 	u64 adj_start = hole_start;
    269 	u64 adj_end = hole_end;
    270 
    271 	BUG_ON(!hole_node->hole_follows || node->allocated);
    272 
    273 	if (mm->color_adjust)
    274 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
    275 
    276 	adj_start = max(adj_start, start);
    277 	adj_end = min(adj_end, end);
    278 
    279 	if (flags & DRM_MM_CREATE_TOP)
    280 		adj_start = adj_end - size;
    281 
    282 	if (alignment) {
    283 		u64 tmp = adj_start;
    284 		unsigned rem;
    285 
    286 		rem = do_div(tmp, alignment);
    287 		if (rem) {
    288 			if (flags & DRM_MM_CREATE_TOP)
    289 				adj_start -= rem;
    290 			else
    291 				adj_start += alignment - rem;
    292 		}
    293 	}
    294 
    295 	if (adj_start == hole_start) {
    296 		hole_node->hole_follows = 0;
    297 		list_del(&hole_node->hole_stack);
    298 	}
    299 
    300 	node->start = adj_start;
    301 	node->size = size;
    302 	node->mm = mm;
    303 	node->color = color;
    304 	node->allocated = 1;
    305 
    306 	INIT_LIST_HEAD(&node->hole_stack);
    307 	list_add(&node->node_list, &hole_node->node_list);
    308 
    309 	BUG_ON(node->start < start);
    310 	BUG_ON(node->start < adj_start);
    311 	BUG_ON(node->start + node->size > adj_end);
    312 	BUG_ON(node->start + node->size > end);
    313 
    314 	node->hole_follows = 0;
    315 	if (__drm_mm_hole_node_start(node) < hole_end) {
    316 		list_add(&node->hole_stack, &mm->hole_stack);
    317 		node->hole_follows = 1;
    318 	}
    319 }
    320 
    321 /**
    322  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
    323  * @mm: drm_mm to allocate from
    324  * @node: preallocate node to insert
    325  * @size: size of the allocation
    326  * @alignment: alignment of the allocation
    327  * @color: opaque tag value to use for this node
    328  * @start: start of the allowed range for this node
    329  * @end: end of the allowed range for this node
    330  * @sflags: flags to fine-tune the allocation search
    331  * @aflags: flags to fine-tune the allocation behavior
    332  *
    333  * The preallocated node must be cleared to 0.
    334  *
    335  * Returns:
    336  * 0 on success, -ENOSPC if there's no suitable hole.
    337  */
    338 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
    339 					u64 size, unsigned alignment,
    340 					unsigned long color,
    341 					u64 start, u64 end,
    342 					enum drm_mm_search_flags sflags,
    343 					enum drm_mm_allocator_flags aflags)
    344 {
    345 	struct drm_mm_node *hole_node;
    346 
    347 	hole_node = drm_mm_search_free_in_range_generic(mm,
    348 							size, alignment, color,
    349 							start, end, sflags);
    350 	if (!hole_node)
    351 		return -ENOSPC;
    352 
    353 	drm_mm_insert_helper_range(hole_node, node,
    354 				   size, alignment, color,
    355 				   start, end, aflags);
    356 	return 0;
    357 }
    358 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
    359 
    360 /**
    361  * drm_mm_remove_node - Remove a memory node from the allocator.
    362  * @node: drm_mm_node to remove
    363  *
    364  * This just removes a node from its drm_mm allocator. The node does not need to
    365  * be cleared again before it can be re-inserted into this or any other drm_mm
    366  * allocator. It is a bug to call this function on a un-allocated node.
    367  */
    368 void drm_mm_remove_node(struct drm_mm_node *node)
    369 {
    370 	struct drm_mm *mm = node->mm;
    371 	struct drm_mm_node *prev_node;
    372 
    373 	if (WARN_ON(!node->allocated))
    374 		return;
    375 
    376 	BUG_ON(node->scanned_block || node->scanned_prev_free
    377 				   || node->scanned_next_free);
    378 
    379 	prev_node =
    380 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
    381 
    382 	if (node->hole_follows) {
    383 		BUG_ON(__drm_mm_hole_node_start(node) ==
    384 		       __drm_mm_hole_node_end(node));
    385 		list_del(&node->hole_stack);
    386 	} else
    387 		BUG_ON(__drm_mm_hole_node_start(node) !=
    388 		       __drm_mm_hole_node_end(node));
    389 
    390 
    391 	if (!prev_node->hole_follows) {
    392 		prev_node->hole_follows = 1;
    393 		list_add(&prev_node->hole_stack, &mm->hole_stack);
    394 	} else
    395 		list_move(&prev_node->hole_stack, &mm->hole_stack);
    396 
    397 	list_del(&node->node_list);
    398 	node->allocated = 0;
    399 }
    400 EXPORT_SYMBOL(drm_mm_remove_node);
    401 
    402 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
    403 {
    404 	if (end - start < size)
    405 		return 0;
    406 
    407 	if (alignment) {
    408 		u64 tmp = start;
    409 		unsigned rem;
    410 
    411 		rem = do_div(tmp, alignment);
    412 		if (rem)
    413 			start += alignment - rem;
    414 	}
    415 
    416 	return end >= start + size;
    417 }
    418 
    419 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
    420 						      u64 size,
    421 						      unsigned alignment,
    422 						      unsigned long color,
    423 						      enum drm_mm_search_flags flags)
    424 {
    425 	struct drm_mm_node *entry;
    426 	struct drm_mm_node *best;
    427 	u64 adj_start;
    428 	u64 adj_end;
    429 	u64 best_size;
    430 
    431 	BUG_ON(mm->scanned_blocks);
    432 
    433 	best = NULL;
    434 	best_size = ~0UL;
    435 
    436 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
    437 			       flags & DRM_MM_SEARCH_BELOW) {
    438 		u64 hole_size = adj_end - adj_start;
    439 
    440 		if (mm->color_adjust) {
    441 			mm->color_adjust(entry, color, &adj_start, &adj_end);
    442 			if (adj_end <= adj_start)
    443 				continue;
    444 		}
    445 
    446 		if (!check_free_hole(adj_start, adj_end, size, alignment))
    447 			continue;
    448 
    449 		if (!(flags & DRM_MM_SEARCH_BEST))
    450 			return entry;
    451 
    452 		if (hole_size < best_size) {
    453 			best = entry;
    454 			best_size = hole_size;
    455 		}
    456 	}
    457 
    458 	return best;
    459 }
    460 
    461 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
    462 							u64 size,
    463 							unsigned alignment,
    464 							unsigned long color,
    465 							u64 start,
    466 							u64 end,
    467 							enum drm_mm_search_flags flags)
    468 {
    469 	struct drm_mm_node *entry;
    470 	struct drm_mm_node *best;
    471 	u64 adj_start;
    472 	u64 adj_end;
    473 	u64 best_size;
    474 
    475 	BUG_ON(mm->scanned_blocks);
    476 
    477 	best = NULL;
    478 	best_size = ~0UL;
    479 
    480 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
    481 			       flags & DRM_MM_SEARCH_BELOW) {
    482 		u64 hole_size = adj_end - adj_start;
    483 
    484 		if (mm->color_adjust) {
    485 			mm->color_adjust(entry, color, &adj_start, &adj_end);
    486 			if (adj_end <= adj_start)
    487 				continue;
    488 		}
    489 
    490 		adj_start = max(adj_start, start);
    491 		adj_end = min(adj_end, end);
    492 
    493 		if (!check_free_hole(adj_start, adj_end, size, alignment))
    494 			continue;
    495 
    496 		if (!(flags & DRM_MM_SEARCH_BEST))
    497 			return entry;
    498 
    499 		if (hole_size < best_size) {
    500 			best = entry;
    501 			best_size = hole_size;
    502 		}
    503 	}
    504 
    505 	return best;
    506 }
    507 
    508 /**
    509  * drm_mm_replace_node - move an allocation from @old to @new
    510  * @old: drm_mm_node to remove from the allocator
    511  * @new: drm_mm_node which should inherit @old's allocation
    512  *
    513  * This is useful for when drivers embed the drm_mm_node structure and hence
    514  * can't move allocations by reassigning pointers. It's a combination of remove
    515  * and insert with the guarantee that the allocation start will match.
    516  */
    517 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
    518 {
    519 	list_replace(&old->node_list, &new->node_list);
    520 	list_replace(&old->hole_stack, &new->hole_stack);
    521 	new->hole_follows = old->hole_follows;
    522 	new->mm = old->mm;
    523 	new->start = old->start;
    524 	new->size = old->size;
    525 	new->color = old->color;
    526 
    527 	old->allocated = 0;
    528 	new->allocated = 1;
    529 }
    530 EXPORT_SYMBOL(drm_mm_replace_node);
    531 
    532 /**
    533  * DOC: lru scan roaster
    534  *
    535  * Very often GPUs need to have continuous allocations for a given object. When
    536  * evicting objects to make space for a new one it is therefore not most
    537  * efficient when we simply start to select all objects from the tail of an LRU
    538  * until there's a suitable hole: Especially for big objects or nodes that
    539  * otherwise have special allocation constraints there's a good chance we evict
    540  * lots of (smaller) objects unecessarily.
    541  *
    542  * The DRM range allocator supports this use-case through the scanning
    543  * interfaces. First a scan operation needs to be initialized with
    544  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
    545  * objects to the roaster (probably by walking an LRU list, but this can be
    546  * freely implemented) until a suitable hole is found or there's no further
    547  * evitable object.
    548  *
    549  * The the driver must walk through all objects again in exactly the reverse
    550  * order to restore the allocator state. Note that while the allocator is used
    551  * in the scan mode no other operation is allowed.
    552  *
    553  * Finally the driver evicts all objects selected in the scan. Adding and
    554  * removing an object is O(1), and since freeing a node is also O(1) the overall
    555  * complexity is O(scanned_objects). So like the free stack which needs to be
    556  * walked before a scan operation even begins this is linear in the number of
    557  * objects. It doesn't seem to hurt badly.
    558  */
    559 
    560 /**
    561  * drm_mm_init_scan - initialize lru scanning
    562  * @mm: drm_mm to scan
    563  * @size: size of the allocation
    564  * @alignment: alignment of the allocation
    565  * @color: opaque tag value to use for the allocation
    566  *
    567  * This simply sets up the scanning routines with the parameters for the desired
    568  * hole. Note that there's no need to specify allocation flags, since they only
    569  * change the place a node is allocated from within a suitable hole.
    570  *
    571  * Warning:
    572  * As long as the scan list is non-empty, no other operations than
    573  * adding/removing nodes to/from the scan list are allowed.
    574  */
    575 void drm_mm_init_scan(struct drm_mm *mm,
    576 		      u64 size,
    577 		      unsigned alignment,
    578 		      unsigned long color)
    579 {
    580 	mm->scan_color = color;
    581 	mm->scan_alignment = alignment;
    582 	mm->scan_size = size;
    583 	mm->scanned_blocks = 0;
    584 	mm->scan_hit_start = 0;
    585 	mm->scan_hit_end = 0;
    586 	mm->scan_check_range = 0;
    587 	mm->prev_scanned_node = NULL;
    588 }
    589 EXPORT_SYMBOL(drm_mm_init_scan);
    590 
    591 /**
    592  * drm_mm_init_scan - initialize range-restricted lru scanning
    593  * @mm: drm_mm to scan
    594  * @size: size of the allocation
    595  * @alignment: alignment of the allocation
    596  * @color: opaque tag value to use for the allocation
    597  * @start: start of the allowed range for the allocation
    598  * @end: end of the allowed range for the allocation
    599  *
    600  * This simply sets up the scanning routines with the parameters for the desired
    601  * hole. Note that there's no need to specify allocation flags, since they only
    602  * change the place a node is allocated from within a suitable hole.
    603  *
    604  * Warning:
    605  * As long as the scan list is non-empty, no other operations than
    606  * adding/removing nodes to/from the scan list are allowed.
    607  */
    608 void drm_mm_init_scan_with_range(struct drm_mm *mm,
    609 				 u64 size,
    610 				 unsigned alignment,
    611 				 unsigned long color,
    612 				 u64 start,
    613 				 u64 end)
    614 {
    615 	mm->scan_color = color;
    616 	mm->scan_alignment = alignment;
    617 	mm->scan_size = size;
    618 	mm->scanned_blocks = 0;
    619 	mm->scan_hit_start = 0;
    620 	mm->scan_hit_end = 0;
    621 	mm->scan_start = start;
    622 	mm->scan_end = end;
    623 	mm->scan_check_range = 1;
    624 	mm->prev_scanned_node = NULL;
    625 }
    626 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
    627 
    628 /**
    629  * drm_mm_scan_add_block - add a node to the scan list
    630  * @node: drm_mm_node to add
    631  *
    632  * Add a node to the scan list that might be freed to make space for the desired
    633  * hole.
    634  *
    635  * Returns:
    636  * True if a hole has been found, false otherwise.
    637  */
    638 bool drm_mm_scan_add_block(struct drm_mm_node *node)
    639 {
    640 	struct drm_mm *mm = node->mm;
    641 	struct drm_mm_node *prev_node;
    642 	u64 hole_start, hole_end;
    643 	u64 adj_start, adj_end;
    644 
    645 	mm->scanned_blocks++;
    646 
    647 	BUG_ON(node->scanned_block);
    648 	node->scanned_block = 1;
    649 
    650 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
    651 			       node_list);
    652 
    653 	node->scanned_preceeds_hole = prev_node->hole_follows;
    654 	prev_node->hole_follows = 1;
    655 	list_del(&node->node_list);
    656 	node->node_list.prev = &prev_node->node_list;
    657 	node->node_list.next = &mm->prev_scanned_node->node_list;
    658 	mm->prev_scanned_node = node;
    659 
    660 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
    661 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
    662 
    663 	if (mm->scan_check_range) {
    664 		if (adj_start < mm->scan_start)
    665 			adj_start = mm->scan_start;
    666 		if (adj_end > mm->scan_end)
    667 			adj_end = mm->scan_end;
    668 	}
    669 
    670 	if (mm->color_adjust)
    671 		mm->color_adjust(prev_node, mm->scan_color,
    672 				 &adj_start, &adj_end);
    673 
    674 	if (check_free_hole(adj_start, adj_end,
    675 			    mm->scan_size, mm->scan_alignment)) {
    676 		mm->scan_hit_start = hole_start;
    677 		mm->scan_hit_end = hole_end;
    678 		return true;
    679 	}
    680 
    681 	return false;
    682 }
    683 EXPORT_SYMBOL(drm_mm_scan_add_block);
    684 
    685 /**
    686  * drm_mm_scan_remove_block - remove a node from the scan list
    687  * @node: drm_mm_node to remove
    688  *
    689  * Nodes _must_ be removed in the exact same order from the scan list as they
    690  * have been added, otherwise the internal state of the memory manager will be
    691  * corrupted.
    692  *
    693  * When the scan list is empty, the selected memory nodes can be freed. An
    694  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
    695  * return the just freed block (because its at the top of the free_stack list).
    696  *
    697  * Returns:
    698  * True if this block should be evicted, false otherwise. Will always
    699  * return false when no hole has been found.
    700  */
    701 bool drm_mm_scan_remove_block(struct drm_mm_node *node)
    702 {
    703 	struct drm_mm *mm = node->mm;
    704 	struct drm_mm_node *prev_node;
    705 
    706 	mm->scanned_blocks--;
    707 
    708 	BUG_ON(!node->scanned_block);
    709 	node->scanned_block = 0;
    710 
    711 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
    712 			       node_list);
    713 
    714 	prev_node->hole_follows = node->scanned_preceeds_hole;
    715 	list_add(&node->node_list, &prev_node->node_list);
    716 
    717 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
    718 		 node->start < mm->scan_hit_end);
    719 }
    720 EXPORT_SYMBOL(drm_mm_scan_remove_block);
    721 
    722 /**
    723  * drm_mm_clean - checks whether an allocator is clean
    724  * @mm: drm_mm allocator to check
    725  *
    726  * Returns:
    727  * True if the allocator is completely free, false if there's still a node
    728  * allocated in it.
    729  */
    730 bool drm_mm_clean(struct drm_mm * mm)
    731 {
    732 	struct list_head *head = &mm->head_node.node_list;
    733 
    734 	return (head->next->next == head);
    735 }
    736 EXPORT_SYMBOL(drm_mm_clean);
    737 
    738 /**
    739  * drm_mm_init - initialize a drm-mm allocator
    740  * @mm: the drm_mm structure to initialize
    741  * @start: start of the range managed by @mm
    742  * @size: end of the range managed by @mm
    743  *
    744  * Note that @mm must be cleared to 0 before calling this function.
    745  */
    746 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
    747 {
    748 	INIT_LIST_HEAD(&mm->hole_stack);
    749 	mm->scanned_blocks = 0;
    750 
    751 	/* Clever trick to avoid a special case in the free hole tracking. */
    752 	INIT_LIST_HEAD(&mm->head_node.node_list);
    753 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
    754 	mm->head_node.hole_follows = 1;
    755 	mm->head_node.scanned_block = 0;
    756 	mm->head_node.scanned_prev_free = 0;
    757 	mm->head_node.scanned_next_free = 0;
    758 	mm->head_node.mm = mm;
    759 	mm->head_node.start = start + size;
    760 	mm->head_node.size = start - mm->head_node.start;
    761 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
    762 
    763 	mm->color_adjust = NULL;
    764 }
    765 EXPORT_SYMBOL(drm_mm_init);
    766 
    767 /**
    768  * drm_mm_takedown - clean up a drm_mm allocator
    769  * @mm: drm_mm allocator to clean up
    770  *
    771  * Note that it is a bug to call this function on an allocator which is not
    772  * clean.
    773  */
    774 void drm_mm_takedown(struct drm_mm * mm)
    775 {
    776 	WARN(!list_empty(&mm->head_node.node_list),
    777 	     "Memory manager not clean during takedown.\n");
    778 }
    779 EXPORT_SYMBOL(drm_mm_takedown);
    780 
    781 static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
    782 				     const char *prefix)
    783 {
    784 	u64 hole_start, hole_end, hole_size;
    785 
    786 	if (entry->hole_follows) {
    787 		hole_start = drm_mm_hole_node_start(entry);
    788 		hole_end = drm_mm_hole_node_end(entry);
    789 		hole_size = hole_end - hole_start;
    790 		pr_debug("%s %#"PRIx64"-%#"PRIx64": %"PRIu64": free\n", prefix, hole_start,
    791 			 hole_end, hole_size);
    792 		return hole_size;
    793 	}
    794 
    795 	return 0;
    796 }
    797 
    798 /**
    799  * drm_mm_debug_table - dump allocator state to dmesg
    800  * @mm: drm_mm allocator to dump
    801  * @prefix: prefix to use for dumping to dmesg
    802  */
    803 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
    804 {
    805 	struct drm_mm_node *entry;
    806 	u64 total_used = 0, total_free = 0, total = 0;
    807 
    808 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
    809 
    810 	drm_mm_for_each_node(entry, mm) {
    811 		pr_debug("%s %#"PRIx64"-%#"PRIx64": %"PRIu64": used\n", prefix, entry->start,
    812 			 entry->start + entry->size, entry->size);
    813 		total_used += entry->size;
    814 		total_free += drm_mm_debug_hole(entry, prefix);
    815 	}
    816 	total = total_free + total_used;
    817 
    818 	pr_debug("%s total: %"PRIu64", used %"PRIu64" free %"PRIu64"\n", prefix, total,
    819 		 total_used, total_free);
    820 }
    821 EXPORT_SYMBOL(drm_mm_debug_table);
    822 
    823 #if defined(CONFIG_DEBUG_FS)
    824 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
    825 {
    826 	u64 hole_start, hole_end, hole_size;
    827 
    828 	if (entry->hole_follows) {
    829 		hole_start = drm_mm_hole_node_start(entry);
    830 		hole_end = drm_mm_hole_node_end(entry);
    831 		hole_size = hole_end - hole_start;
    832 		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
    833 			   hole_end, hole_size);
    834 		return hole_size;
    835 	}
    836 
    837 	return 0;
    838 }
    839 
    840 /**
    841  * drm_mm_dump_table - dump allocator state to a seq_file
    842  * @m: seq_file to dump to
    843  * @mm: drm_mm allocator to dump
    844  */
    845 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
    846 {
    847 	struct drm_mm_node *entry;
    848 	u64 total_used = 0, total_free = 0, total = 0;
    849 
    850 	total_free += drm_mm_dump_hole(m, &mm->head_node);
    851 
    852 	drm_mm_for_each_node(entry, mm) {
    853 		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
    854 			   entry->start + entry->size, entry->size);
    855 		total_used += entry->size;
    856 		total_free += drm_mm_dump_hole(m, entry);
    857 	}
    858 	total = total_free + total_used;
    859 
    860 	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
    861 		   total_used, total_free);
    862 	return 0;
    863 }
    864 EXPORT_SYMBOL(drm_mm_dump_table);
    865 #endif
    866