Home | History | Annotate | Line # | Download | only in drm
drm_mm.c revision 1.2.4.2
      1 /**************************************************************************
      2  *
      3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  *
     27  **************************************************************************/
     28 
     29 /*
     30  * Generic simple memory manager implementation. Intended to be used as a base
     31  * class implementation for more advanced memory managers.
     32  *
     33  * Note that the algorithm used is quite simple and there might be substantial
     34  * performance gains if a smarter free list is implemented. Currently it is just an
     35  * unordered stack of free regions. This could easily be improved if an RB-tree
     36  * is used instead. At least if we expect heavy fragmentation.
     37  *
     38  * Aligned allocations can also see improvement.
     39  *
     40  * Authors:
     41  * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
     42  */
     43 
     44 #include <drm/drmP.h>
     45 #include <drm/drm_mm.h>
     46 #include <linux/slab.h>
     47 #include <linux/seq_file.h>
     48 #include <linux/export.h>
     49 #include <linux/printk.h>
     50 #include <asm/bug.h>
     51 
     52 #define MM_UNUSED_TARGET 4
     53 
     54 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
     55 {
     56 	struct drm_mm_node *child;
     57 
     58 	if (atomic)
     59 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
     60 	else
     61 		child = kzalloc(sizeof(*child), GFP_KERNEL);
     62 
     63 	if (unlikely(child == NULL)) {
     64 		spin_lock(&mm->unused_lock);
     65 		if (list_empty(&mm->unused_nodes))
     66 			child = NULL;
     67 		else {
     68 			child =
     69 			    list_entry(mm->unused_nodes.next,
     70 				       struct drm_mm_node, node_list);
     71 			list_del(&child->node_list);
     72 			--mm->num_unused;
     73 		}
     74 		spin_unlock(&mm->unused_lock);
     75 	}
     76 	return child;
     77 }
     78 
     79 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
     80  * drm_mm:	memory manager struct we are pre-allocating for
     81  *
     82  * Returns 0 on success or -ENOMEM if allocation fails.
     83  */
     84 int drm_mm_pre_get(struct drm_mm *mm)
     85 {
     86 	struct drm_mm_node *node;
     87 
     88 	spin_lock(&mm->unused_lock);
     89 	while (mm->num_unused < MM_UNUSED_TARGET) {
     90 		spin_unlock(&mm->unused_lock);
     91 		node = kzalloc(sizeof(*node), GFP_KERNEL);
     92 		spin_lock(&mm->unused_lock);
     93 
     94 		if (unlikely(node == NULL)) {
     95 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
     96 			spin_unlock(&mm->unused_lock);
     97 			return ret;
     98 		}
     99 		++mm->num_unused;
    100 		list_add_tail(&node->node_list, &mm->unused_nodes);
    101 	}
    102 	spin_unlock(&mm->unused_lock);
    103 	return 0;
    104 }
    105 EXPORT_SYMBOL(drm_mm_pre_get);
    106 
    107 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
    108 {
    109 	return hole_node->start + hole_node->size;
    110 }
    111 
    112 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
    113 {
    114 	struct drm_mm_node *next_node =
    115 		list_entry(hole_node->node_list.next, struct drm_mm_node,
    116 			   node_list);
    117 
    118 	return next_node->start;
    119 }
    120 
    121 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
    122 				 struct drm_mm_node *node,
    123 				 unsigned long size, unsigned alignment,
    124 				 unsigned long color)
    125 {
    126 	struct drm_mm *mm = hole_node->mm;
    127 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
    128 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
    129 	unsigned long adj_start = hole_start;
    130 	unsigned long adj_end = hole_end;
    131 
    132 	BUG_ON(!hole_node->hole_follows || node->allocated);
    133 
    134 	if (mm->color_adjust)
    135 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
    136 
    137 	if (alignment) {
    138 		unsigned tmp = adj_start % alignment;
    139 		if (tmp)
    140 			adj_start += alignment - tmp;
    141 	}
    142 
    143 	if (adj_start == hole_start) {
    144 		hole_node->hole_follows = 0;
    145 		list_del(&hole_node->hole_stack);
    146 	}
    147 
    148 	node->start = adj_start;
    149 	node->size = size;
    150 	node->mm = mm;
    151 	node->color = color;
    152 	node->allocated = 1;
    153 
    154 	INIT_LIST_HEAD(&node->hole_stack);
    155 	list_add(&node->node_list, &hole_node->node_list);
    156 
    157 	BUG_ON(node->start + node->size > adj_end);
    158 
    159 	node->hole_follows = 0;
    160 	if (node->start + node->size < hole_end) {
    161 		list_add(&node->hole_stack, &mm->hole_stack);
    162 		node->hole_follows = 1;
    163 	}
    164 }
    165 
    166 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
    167 					     unsigned long size,
    168 					     unsigned alignment,
    169 					     unsigned long color,
    170 					     int atomic)
    171 {
    172 	struct drm_mm_node *node;
    173 
    174 	node = drm_mm_kmalloc(hole_node->mm, atomic);
    175 	if (unlikely(node == NULL))
    176 		return NULL;
    177 
    178 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
    179 
    180 	return node;
    181 }
    182 EXPORT_SYMBOL(drm_mm_get_block_generic);
    183 
    184 /**
    185  * Search for free space and insert a preallocated memory node. Returns
    186  * -ENOSPC if no suitable free area is available. The preallocated memory node
    187  * must be cleared.
    188  */
    189 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
    190 			       unsigned long size, unsigned alignment,
    191 			       unsigned long color)
    192 {
    193 	struct drm_mm_node *hole_node;
    194 
    195 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
    196 					       color, 0);
    197 	if (!hole_node)
    198 		return -ENOSPC;
    199 
    200 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
    201 	return 0;
    202 }
    203 EXPORT_SYMBOL(drm_mm_insert_node_generic);
    204 
    205 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
    206 		       unsigned long size, unsigned alignment)
    207 {
    208 	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
    209 }
    210 EXPORT_SYMBOL(drm_mm_insert_node);
    211 
    212 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
    213 				       struct drm_mm_node *node,
    214 				       unsigned long size, unsigned alignment,
    215 				       unsigned long color,
    216 				       unsigned long start, unsigned long end)
    217 {
    218 	struct drm_mm *mm = hole_node->mm;
    219 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
    220 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
    221 	unsigned long adj_start = hole_start;
    222 	unsigned long adj_end = hole_end;
    223 
    224 	BUG_ON(!hole_node->hole_follows || node->allocated);
    225 
    226 	if (adj_start < start)
    227 		adj_start = start;
    228 	if (adj_end > end)
    229 		adj_end = end;
    230 
    231 	if (mm->color_adjust)
    232 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
    233 
    234 	if (alignment) {
    235 		unsigned tmp = adj_start % alignment;
    236 		if (tmp)
    237 			adj_start += alignment - tmp;
    238 	}
    239 
    240 	if (adj_start == hole_start) {
    241 		hole_node->hole_follows = 0;
    242 		list_del(&hole_node->hole_stack);
    243 	}
    244 
    245 	node->start = adj_start;
    246 	node->size = size;
    247 	node->mm = mm;
    248 	node->color = color;
    249 	node->allocated = 1;
    250 
    251 	INIT_LIST_HEAD(&node->hole_stack);
    252 	list_add(&node->node_list, &hole_node->node_list);
    253 
    254 	BUG_ON(node->start + node->size > adj_end);
    255 	BUG_ON(node->start + node->size > end);
    256 
    257 	node->hole_follows = 0;
    258 	if (node->start + node->size < hole_end) {
    259 		list_add(&node->hole_stack, &mm->hole_stack);
    260 		node->hole_follows = 1;
    261 	}
    262 }
    263 
    264 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
    265 						unsigned long size,
    266 						unsigned alignment,
    267 						unsigned long color,
    268 						unsigned long start,
    269 						unsigned long end,
    270 						int atomic)
    271 {
    272 	struct drm_mm_node *node;
    273 
    274 	node = drm_mm_kmalloc(hole_node->mm, atomic);
    275 	if (unlikely(node == NULL))
    276 		return NULL;
    277 
    278 	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
    279 				   start, end);
    280 
    281 	return node;
    282 }
    283 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
    284 
    285 /**
    286  * Search for free space and insert a preallocated memory node. Returns
    287  * -ENOSPC if no suitable free area is available. This is for range
    288  * restricted allocations. The preallocated memory node must be cleared.
    289  */
    290 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
    291 					unsigned long size, unsigned alignment, unsigned long color,
    292 					unsigned long start, unsigned long end)
    293 {
    294 	struct drm_mm_node *hole_node;
    295 
    296 	hole_node = drm_mm_search_free_in_range_generic(mm,
    297 							size, alignment, color,
    298 							start, end, 0);
    299 	if (!hole_node)
    300 		return -ENOSPC;
    301 
    302 	drm_mm_insert_helper_range(hole_node, node,
    303 				   size, alignment, color,
    304 				   start, end);
    305 	return 0;
    306 }
    307 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
    308 
    309 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
    310 				unsigned long size, unsigned alignment,
    311 				unsigned long start, unsigned long end)
    312 {
    313 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
    314 }
    315 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
    316 
    317 /**
    318  * Remove a memory node from the allocator.
    319  */
    320 void drm_mm_remove_node(struct drm_mm_node *node)
    321 {
    322 	struct drm_mm *mm = node->mm;
    323 	struct drm_mm_node *prev_node;
    324 
    325 	BUG_ON(node->scanned_block || node->scanned_prev_free
    326 				   || node->scanned_next_free);
    327 
    328 	prev_node =
    329 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
    330 
    331 	if (node->hole_follows) {
    332 		BUG_ON(drm_mm_hole_node_start(node)
    333 				== drm_mm_hole_node_end(node));
    334 		list_del(&node->hole_stack);
    335 	} else
    336 		BUG_ON(drm_mm_hole_node_start(node)
    337 				!= drm_mm_hole_node_end(node));
    338 
    339 	if (!prev_node->hole_follows) {
    340 		prev_node->hole_follows = 1;
    341 		list_add(&prev_node->hole_stack, &mm->hole_stack);
    342 	} else
    343 		list_move(&prev_node->hole_stack, &mm->hole_stack);
    344 
    345 	list_del(&node->node_list);
    346 	node->allocated = 0;
    347 }
    348 EXPORT_SYMBOL(drm_mm_remove_node);
    349 
    350 /*
    351  * Remove a memory node from the allocator and free the allocated struct
    352  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
    353  * drm_mm_get_block functions.
    354  */
    355 void drm_mm_put_block(struct drm_mm_node *node)
    356 {
    357 
    358 	struct drm_mm *mm = node->mm;
    359 
    360 	drm_mm_remove_node(node);
    361 
    362 	spin_lock(&mm->unused_lock);
    363 	if (mm->num_unused < MM_UNUSED_TARGET) {
    364 		list_add(&node->node_list, &mm->unused_nodes);
    365 		++mm->num_unused;
    366 	} else
    367 		kfree(node);
    368 	spin_unlock(&mm->unused_lock);
    369 }
    370 EXPORT_SYMBOL(drm_mm_put_block);
    371 
    372 static int check_free_hole(unsigned long start, unsigned long end,
    373 			   unsigned long size, unsigned alignment)
    374 {
    375 	if (end - start < size)
    376 		return 0;
    377 
    378 	if (alignment) {
    379 		unsigned tmp = start % alignment;
    380 		if (tmp)
    381 			start += alignment - tmp;
    382 	}
    383 
    384 	return end >= start + size;
    385 }
    386 
    387 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
    388 					       unsigned long size,
    389 					       unsigned alignment,
    390 					       unsigned long color,
    391 					       bool best_match)
    392 {
    393 	struct drm_mm_node *entry;
    394 	struct drm_mm_node *best;
    395 	unsigned long best_size;
    396 
    397 	BUG_ON(mm->scanned_blocks);
    398 
    399 	best = NULL;
    400 	best_size = ~0UL;
    401 
    402 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
    403 		unsigned long adj_start = drm_mm_hole_node_start(entry);
    404 		unsigned long adj_end = drm_mm_hole_node_end(entry);
    405 
    406 		if (mm->color_adjust) {
    407 			mm->color_adjust(entry, color, &adj_start, &adj_end);
    408 			if (adj_end <= adj_start)
    409 				continue;
    410 		}
    411 
    412 		BUG_ON(!entry->hole_follows);
    413 		if (!check_free_hole(adj_start, adj_end, size, alignment))
    414 			continue;
    415 
    416 		if (!best_match)
    417 			return entry;
    418 
    419 		if (entry->size < best_size) {
    420 			best = entry;
    421 			best_size = entry->size;
    422 		}
    423 	}
    424 
    425 	return best;
    426 }
    427 EXPORT_SYMBOL(drm_mm_search_free_generic);
    428 
    429 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
    430 							unsigned long size,
    431 							unsigned alignment,
    432 							unsigned long color,
    433 							unsigned long start,
    434 							unsigned long end,
    435 							bool best_match)
    436 {
    437 	struct drm_mm_node *entry;
    438 	struct drm_mm_node *best;
    439 	unsigned long best_size;
    440 
    441 	BUG_ON(mm->scanned_blocks);
    442 
    443 	best = NULL;
    444 	best_size = ~0UL;
    445 
    446 	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
    447 		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
    448 			start : drm_mm_hole_node_start(entry);
    449 		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
    450 			end : drm_mm_hole_node_end(entry);
    451 
    452 		BUG_ON(!entry->hole_follows);
    453 
    454 		if (mm->color_adjust) {
    455 			mm->color_adjust(entry, color, &adj_start, &adj_end);
    456 			if (adj_end <= adj_start)
    457 				continue;
    458 		}
    459 
    460 		if (!check_free_hole(adj_start, adj_end, size, alignment))
    461 			continue;
    462 
    463 		if (!best_match)
    464 			return entry;
    465 
    466 		if (entry->size < best_size) {
    467 			best = entry;
    468 			best_size = entry->size;
    469 		}
    470 	}
    471 
    472 	return best;
    473 }
    474 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
    475 
    476 /**
    477  * Moves an allocation. To be used with embedded struct drm_mm_node.
    478  */
    479 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
    480 {
    481 	list_replace(&old->node_list, &new->node_list);
    482 	list_replace(&old->hole_stack, &new->hole_stack);
    483 	new->hole_follows = old->hole_follows;
    484 	new->mm = old->mm;
    485 	new->start = old->start;
    486 	new->size = old->size;
    487 	new->color = old->color;
    488 
    489 	old->allocated = 0;
    490 	new->allocated = 1;
    491 }
    492 EXPORT_SYMBOL(drm_mm_replace_node);
    493 
    494 /**
    495  * Initializa lru scanning.
    496  *
    497  * This simply sets up the scanning routines with the parameters for the desired
    498  * hole.
    499  *
    500  * Warning: As long as the scan list is non-empty, no other operations than
    501  * adding/removing nodes to/from the scan list are allowed.
    502  */
    503 void drm_mm_init_scan(struct drm_mm *mm,
    504 		      unsigned long size,
    505 		      unsigned alignment,
    506 		      unsigned long color)
    507 {
    508 	mm->scan_color = color;
    509 	mm->scan_alignment = alignment;
    510 	mm->scan_size = size;
    511 	mm->scanned_blocks = 0;
    512 	mm->scan_hit_start = 0;
    513 	mm->scan_hit_end = 0;
    514 	mm->scan_check_range = 0;
    515 	mm->prev_scanned_node = NULL;
    516 }
    517 EXPORT_SYMBOL(drm_mm_init_scan);
    518 
    519 /**
    520  * Initializa lru scanning.
    521  *
    522  * This simply sets up the scanning routines with the parameters for the desired
    523  * hole. This version is for range-restricted scans.
    524  *
    525  * Warning: As long as the scan list is non-empty, no other operations than
    526  * adding/removing nodes to/from the scan list are allowed.
    527  */
    528 void drm_mm_init_scan_with_range(struct drm_mm *mm,
    529 				 unsigned long size,
    530 				 unsigned alignment,
    531 				 unsigned long color,
    532 				 unsigned long start,
    533 				 unsigned long end)
    534 {
    535 	mm->scan_color = color;
    536 	mm->scan_alignment = alignment;
    537 	mm->scan_size = size;
    538 	mm->scanned_blocks = 0;
    539 	mm->scan_hit_start = 0;
    540 	mm->scan_hit_end = 0;
    541 	mm->scan_start = start;
    542 	mm->scan_end = end;
    543 	mm->scan_check_range = 1;
    544 	mm->prev_scanned_node = NULL;
    545 }
    546 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
    547 
    548 /**
    549  * Add a node to the scan list that might be freed to make space for the desired
    550  * hole.
    551  *
    552  * Returns non-zero, if a hole has been found, zero otherwise.
    553  */
    554 int drm_mm_scan_add_block(struct drm_mm_node *node)
    555 {
    556 	struct drm_mm *mm = node->mm;
    557 	struct drm_mm_node *prev_node;
    558 	unsigned long hole_start, hole_end;
    559 	unsigned long adj_start, adj_end;
    560 
    561 	mm->scanned_blocks++;
    562 
    563 	BUG_ON(node->scanned_block);
    564 	node->scanned_block = 1;
    565 
    566 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
    567 			       node_list);
    568 
    569 	node->scanned_preceeds_hole = prev_node->hole_follows;
    570 	prev_node->hole_follows = 1;
    571 	list_del(&node->node_list);
    572 	node->node_list.prev = &prev_node->node_list;
    573 	node->node_list.next = &mm->prev_scanned_node->node_list;
    574 	mm->prev_scanned_node = node;
    575 
    576 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
    577 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
    578 
    579 	if (mm->scan_check_range) {
    580 		if (adj_start < mm->scan_start)
    581 			adj_start = mm->scan_start;
    582 		if (adj_end > mm->scan_end)
    583 			adj_end = mm->scan_end;
    584 	}
    585 
    586 	if (mm->color_adjust)
    587 		mm->color_adjust(prev_node, mm->scan_color,
    588 				 &adj_start, &adj_end);
    589 
    590 	if (check_free_hole(adj_start, adj_end,
    591 			    mm->scan_size, mm->scan_alignment)) {
    592 		mm->scan_hit_start = hole_start;
    593 		mm->scan_hit_end = hole_end;
    594 		return 1;
    595 	}
    596 
    597 	return 0;
    598 }
    599 EXPORT_SYMBOL(drm_mm_scan_add_block);
    600 
    601 /**
    602  * Remove a node from the scan list.
    603  *
    604  * Nodes _must_ be removed in the exact same order from the scan list as they
    605  * have been added, otherwise the internal state of the memory manager will be
    606  * corrupted.
    607  *
    608  * When the scan list is empty, the selected memory nodes can be freed. An
    609  * immediately following drm_mm_search_free with best_match = 0 will then return
    610  * the just freed block (because its at the top of the free_stack list).
    611  *
    612  * Returns one if this block should be evicted, zero otherwise. Will always
    613  * return zero when no hole has been found.
    614  */
    615 int drm_mm_scan_remove_block(struct drm_mm_node *node)
    616 {
    617 	struct drm_mm *mm = node->mm;
    618 	struct drm_mm_node *prev_node;
    619 
    620 	mm->scanned_blocks--;
    621 
    622 	BUG_ON(!node->scanned_block);
    623 	node->scanned_block = 0;
    624 
    625 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
    626 			       node_list);
    627 
    628 	prev_node->hole_follows = node->scanned_preceeds_hole;
    629 	list_add(&node->node_list, &prev_node->node_list);
    630 
    631 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
    632 		 node->start < mm->scan_hit_end);
    633 }
    634 EXPORT_SYMBOL(drm_mm_scan_remove_block);
    635 
    636 int drm_mm_clean(struct drm_mm * mm)
    637 {
    638 	struct list_head *head = &mm->head_node.node_list;
    639 
    640 	return (head->next->next == head);
    641 }
    642 EXPORT_SYMBOL(drm_mm_clean);
    643 
    644 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
    645 {
    646 	INIT_LIST_HEAD(&mm->hole_stack);
    647 	INIT_LIST_HEAD(&mm->unused_nodes);
    648 	mm->num_unused = 0;
    649 	mm->scanned_blocks = 0;
    650 	spin_lock_init(&mm->unused_lock);
    651 
    652 	/* Clever trick to avoid a special case in the free hole tracking. */
    653 	INIT_LIST_HEAD(&mm->head_node.node_list);
    654 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
    655 	mm->head_node.hole_follows = 1;
    656 	mm->head_node.scanned_block = 0;
    657 	mm->head_node.scanned_prev_free = 0;
    658 	mm->head_node.scanned_next_free = 0;
    659 	mm->head_node.mm = mm;
    660 	mm->head_node.start = start + size;
    661 	mm->head_node.size = start - mm->head_node.start;
    662 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
    663 
    664 	mm->color_adjust = NULL;
    665 
    666 	return 0;
    667 }
    668 EXPORT_SYMBOL(drm_mm_init);
    669 
    670 void drm_mm_takedown(struct drm_mm * mm)
    671 {
    672 	struct drm_mm_node *entry, *next;
    673 
    674 	BUG_ON(!list_empty(&mm->head_node.node_list));
    675 
    676 	spin_lock(&mm->unused_lock);
    677 	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
    678 		list_del(&entry->node_list);
    679 		kfree(entry);
    680 		--mm->num_unused;
    681 	}
    682 	spin_unlock(&mm->unused_lock);
    683 
    684 	/*
    685 	 * XXX The locking above can't be right -- either it is
    686 	 * unnecessary or it is insufficient.
    687 	 */
    688 	spin_lock_destroy(&mm->unused_lock);
    689 
    690 	BUG_ON(mm->num_unused != 0);
    691 }
    692 EXPORT_SYMBOL(drm_mm_takedown);
    693 
    694 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
    695 {
    696 	struct drm_mm_node *entry;
    697 	unsigned long total_used = 0, total_free = 0, total = 0;
    698 	unsigned long hole_start, hole_end, hole_size;
    699 
    700 	hole_start = drm_mm_hole_node_start(&mm->head_node);
    701 	hole_end = drm_mm_hole_node_end(&mm->head_node);
    702 	hole_size = hole_end - hole_start;
    703 	if (hole_size)
    704 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
    705 			prefix, hole_start, hole_end,
    706 			hole_size);
    707 	total_free += hole_size;
    708 
    709 	drm_mm_for_each_node(entry, mm) {
    710 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
    711 			prefix, entry->start, entry->start + entry->size,
    712 			entry->size);
    713 		total_used += entry->size;
    714 
    715 		if (entry->hole_follows) {
    716 			hole_start = drm_mm_hole_node_start(entry);
    717 			hole_end = drm_mm_hole_node_end(entry);
    718 			hole_size = hole_end - hole_start;
    719 			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
    720 				prefix, hole_start, hole_end,
    721 				hole_size);
    722 			total_free += hole_size;
    723 		}
    724 	}
    725 	total = total_free + total_used;
    726 
    727 	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
    728 		total_used, total_free);
    729 }
    730 EXPORT_SYMBOL(drm_mm_debug_table);
    731 
    732 #if defined(CONFIG_DEBUG_FS)
    733 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
    734 {
    735 	struct drm_mm_node *entry;
    736 	unsigned long total_used = 0, total_free = 0, total = 0;
    737 	unsigned long hole_start, hole_end, hole_size;
    738 
    739 	hole_start = drm_mm_hole_node_start(&mm->head_node);
    740 	hole_end = drm_mm_hole_node_end(&mm->head_node);
    741 	hole_size = hole_end - hole_start;
    742 	if (hole_size)
    743 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
    744 				hole_start, hole_end, hole_size);
    745 	total_free += hole_size;
    746 
    747 	drm_mm_for_each_node(entry, mm) {
    748 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
    749 				entry->start, entry->start + entry->size,
    750 				entry->size);
    751 		total_used += entry->size;
    752 		if (entry->hole_follows) {
    753 			hole_start = drm_mm_hole_node_start(entry);
    754 			hole_end = drm_mm_hole_node_end(entry);
    755 			hole_size = hole_end - hole_start;
    756 			seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
    757 					hole_start, hole_end, hole_size);
    758 			total_free += hole_size;
    759 		}
    760 	}
    761 	total = total_free + total_used;
    762 
    763 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
    764 	return 0;
    765 }
    766 EXPORT_SYMBOL(drm_mm_dump_table);
    767 #endif
    768