drm_mm.c revision 1.3.4.2       1  1.3.4.2  tls /**************************************************************************
      2  1.3.4.2  tls  *
      3  1.3.4.2  tls  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
      4  1.3.4.2  tls  * All Rights Reserved.
      5  1.3.4.2  tls  *
      6  1.3.4.2  tls  * Permission is hereby granted, free of charge, to any person obtaining a
      7  1.3.4.2  tls  * copy of this software and associated documentation files (the
      8  1.3.4.2  tls  * "Software"), to deal in the Software without restriction, including
      9  1.3.4.2  tls  * without limitation the rights to use, copy, modify, merge, publish,
     10  1.3.4.2  tls  * distribute, sub license, and/or sell copies of the Software, and to
     11  1.3.4.2  tls  * permit persons to whom the Software is furnished to do so, subject to
     12  1.3.4.2  tls  * the following conditions:
     13  1.3.4.2  tls  *
     14  1.3.4.2  tls  * The above copyright notice and this permission notice (including the
     15  1.3.4.2  tls  * next paragraph) shall be included in all copies or substantial portions
     16  1.3.4.2  tls  * of the Software.
     17  1.3.4.2  tls  *
     18  1.3.4.2  tls  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  1.3.4.2  tls  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  1.3.4.2  tls  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  1.3.4.2  tls  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  1.3.4.2  tls  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  1.3.4.2  tls  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  1.3.4.2  tls  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  1.3.4.2  tls  *
     26  1.3.4.2  tls  *
     27  1.3.4.2  tls  **************************************************************************/
     28  1.3.4.2  tls 
     29  1.3.4.2  tls /*
     30  1.3.4.2  tls  * Generic simple memory manager implementation. Intended to be used as a base
     31  1.3.4.2  tls  * class implementation for more advanced memory managers.
     32  1.3.4.2  tls  *
     33  1.3.4.2  tls  * Note that the algorithm used is quite simple and there might be substantial
     34  1.3.4.2  tls  * performance gains if a smarter free list is implemented. Currently it is just an
     35  1.3.4.2  tls  * unordered stack of free regions. This could easily be improved if an RB-tree
     36  1.3.4.2  tls  * is used instead. At least if we expect heavy fragmentation.
     37  1.3.4.2  tls  *
     38  1.3.4.2  tls  * Aligned allocations can also see improvement.
     39  1.3.4.2  tls  *
     40  1.3.4.2  tls  * Authors:
     41  1.3.4.2  tls  * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
     42  1.3.4.2  tls  */
     43  1.3.4.2  tls 
     44  1.3.4.2  tls #include <drm/drmP.h>
     45  1.3.4.2  tls #include <drm/drm_mm.h>
     46  1.3.4.2  tls #include <linux/slab.h>
     47  1.3.4.2  tls #include <linux/seq_file.h>
     48  1.3.4.2  tls #include <linux/export.h>
     49  1.3.4.2  tls #include <linux/printk.h>
     50  1.3.4.2  tls #include <asm/bug.h>
     51  1.3.4.2  tls 
     52  1.3.4.2  tls /**
     53  1.3.4.2  tls  * DOC: Overview
     54  1.3.4.2  tls  *
     55  1.3.4.2  tls  * drm_mm provides a simple range allocator. The drivers are free to use the
     56  1.3.4.2  tls  * resource allocator from the linux core if it suits them, the upside of drm_mm
     57  1.3.4.2  tls  * is that it's in the DRM core. Which means that it's easier to extend for
     58  1.3.4.2  tls  * some of the crazier special purpose needs of gpus.
     59  1.3.4.2  tls  *
     60  1.3.4.2  tls  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
     61  1.3.4.2  tls  * Drivers are free to embed either of them into their own suitable
     62  1.3.4.2  tls  * datastructures. drm_mm itself will not do any allocations of its own, so if
     63  1.3.4.2  tls  * drivers choose not to embed nodes they need to still allocate them
     64  1.3.4.2  tls  * themselves.
     65  1.3.4.2  tls  *
     66  1.3.4.2  tls  * The range allocator also supports reservation of preallocated blocks. This is
     67  1.3.4.2  tls  * useful for taking over initial mode setting configurations from the firmware,
     68  1.3.4.2  tls  * where an object needs to be created which exactly matches the firmware's
     69  1.3.4.2  tls  * scanout target. As long as the range is still free it can be inserted anytime
     70  1.3.4.2  tls  * after the allocator is initialized, which helps with avoiding looped
     71  1.3.4.2  tls  * depencies in the driver load sequence.
     72  1.3.4.2  tls  *
     73  1.3.4.2  tls  * drm_mm maintains a stack of most recently freed holes, which of all
     74  1.3.4.2  tls  * simplistic datastructures seems to be a fairly decent approach to clustering
     75  1.3.4.2  tls  * allocations and avoiding too much fragmentation. This means free space
     76  1.3.4.2  tls  * searches are O(num_holes). Given that all the fancy features drm_mm supports
     77  1.3.4.2  tls  * something better would be fairly complex and since gfx thrashing is a fairly
     78  1.3.4.2  tls  * steep cliff not a real concern. Removing a node again is O(1).
     79  1.3.4.2  tls  *
     80  1.3.4.2  tls  * drm_mm supports a few features: Alignment and range restrictions can be
     81  1.3.4.2  tls  * supplied. Further more every &drm_mm_node has a color value (which is just an
     82  1.3.4.2  tls  * opaqua unsigned long) which in conjunction with a driver callback can be used
     83  1.3.4.2  tls  * to implement sophisticated placement restrictions. The i915 DRM driver uses
     84  1.3.4.2  tls  * this to implement guard pages between incompatible caching domains in the
     85  1.3.4.2  tls  * graphics TT.
     86  1.3.4.2  tls  *
     87  1.3.4.2  tls  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
     88  1.3.4.2  tls  * The default is bottom-up. Top-down allocation can be used if the memory area
     89  1.3.4.2  tls  * has different restrictions, or just to reduce fragmentation.
     90  1.3.4.2  tls  *
     91  1.3.4.2  tls  * Finally iteration helpers to walk all nodes and all holes are provided as are
     92  1.3.4.2  tls  * some basic allocator dumpers for debugging.
     93  1.3.4.2  tls  */
     94  1.3.4.2  tls 
     95  1.3.4.2  tls static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
     96  1.3.4.2  tls 						unsigned long size,
     97  1.3.4.2  tls 						unsigned alignment,
     98  1.3.4.2  tls 						unsigned long color,
     99  1.3.4.2  tls 						enum drm_mm_search_flags flags);
    100  1.3.4.2  tls static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
    101  1.3.4.2  tls 						unsigned long size,
    102  1.3.4.2  tls 						unsigned alignment,
    103  1.3.4.2  tls 						unsigned long color,
    104  1.3.4.2  tls 						unsigned long start,
    105  1.3.4.2  tls 						unsigned long end,
    106  1.3.4.2  tls 						enum drm_mm_search_flags flags);
    107  1.3.4.2  tls 
    108  1.3.4.2  tls static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
    109  1.3.4.2  tls 				 struct drm_mm_node *node,
    110  1.3.4.2  tls 				 unsigned long size, unsigned alignment,
    111  1.3.4.2  tls 				 unsigned long color,
    112  1.3.4.2  tls 				 enum drm_mm_allocator_flags flags)
    113  1.3.4.2  tls {
    114  1.3.4.2  tls 	struct drm_mm *mm = hole_node->mm;
    115  1.3.4.2  tls 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
    116  1.3.4.2  tls 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
    117  1.3.4.2  tls 	unsigned long adj_start = hole_start;
    118  1.3.4.2  tls 	unsigned long adj_end = hole_end;
    119  1.3.4.2  tls 
    120  1.3.4.2  tls 	BUG_ON(node->allocated);
    121  1.3.4.2  tls 
    122  1.3.4.2  tls 	if (mm->color_adjust)
    123  1.3.4.2  tls 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
    124  1.3.4.2  tls 
    125  1.3.4.2  tls 	if (flags & DRM_MM_CREATE_TOP)
    126  1.3.4.2  tls 		adj_start = adj_end - size;
    127  1.3.4.2  tls 
    128  1.3.4.2  tls 	if (alignment) {
    129  1.3.4.2  tls 		unsigned tmp = adj_start % alignment;
    130  1.3.4.2  tls 		if (tmp) {
    131  1.3.4.2  tls 			if (flags & DRM_MM_CREATE_TOP)
    132  1.3.4.2  tls 				adj_start -= tmp;
    133  1.3.4.2  tls 			else
    134  1.3.4.2  tls 				adj_start += alignment - tmp;
    135  1.3.4.2  tls 		}
    136  1.3.4.2  tls 	}
    137  1.3.4.2  tls 
    138  1.3.4.2  tls 	BUG_ON(adj_start < hole_start);
    139  1.3.4.2  tls 	BUG_ON(adj_end > hole_end);
    140  1.3.4.2  tls 
    141  1.3.4.2  tls 	if (adj_start == hole_start) {
    142  1.3.4.2  tls 		hole_node->hole_follows = 0;
    143  1.3.4.2  tls 		list_del(&hole_node->hole_stack);
    144  1.3.4.2  tls 	}
    145  1.3.4.2  tls 
    146  1.3.4.2  tls 	node->start = adj_start;
    147  1.3.4.2  tls 	node->size = size;
    148  1.3.4.2  tls 	node->mm = mm;
    149  1.3.4.2  tls 	node->color = color;
    150  1.3.4.2  tls 	node->allocated = 1;
    151  1.3.4.2  tls 
    152  1.3.4.2  tls 	INIT_LIST_HEAD(&node->hole_stack);
    153  1.3.4.2  tls 	list_add(&node->node_list, &hole_node->node_list);
    154  1.3.4.2  tls 
    155  1.3.4.2  tls 	BUG_ON(node->start + node->size > adj_end);
    156  1.3.4.2  tls 
    157  1.3.4.2  tls 	node->hole_follows = 0;
    158  1.3.4.2  tls 	if (__drm_mm_hole_node_start(node) < hole_end) {
    159  1.3.4.2  tls 		list_add(&node->hole_stack, &mm->hole_stack);
    160  1.3.4.2  tls 		node->hole_follows = 1;
    161  1.3.4.2  tls 	}
    162  1.3.4.2  tls }
    163  1.3.4.2  tls 
    164  1.3.4.2  tls /**
    165  1.3.4.2  tls  * drm_mm_reserve_node - insert an pre-initialized node
    166  1.3.4.2  tls  * @mm: drm_mm allocator to insert @node into
    167  1.3.4.2  tls  * @node: drm_mm_node to insert
    168  1.3.4.2  tls  *
    169  1.3.4.2  tls  * This functions inserts an already set-up drm_mm_node into the allocator,
    170  1.3.4.2  tls  * meaning that start, size and color must be set by the caller. This is useful
    171  1.3.4.2  tls  * to initialize the allocator with preallocated objects which must be set-up
    172  1.3.4.2  tls  * before the range allocator can be set-up, e.g. when taking over a firmware
    173  1.3.4.2  tls  * framebuffer.
    174  1.3.4.2  tls  *
    175  1.3.4.2  tls  * Returns:
    176  1.3.4.2  tls  * 0 on success, -ENOSPC if there's no hole where @node is.
    177  1.3.4.2  tls  */
    178  1.3.4.2  tls int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
    179  1.3.4.2  tls {
    180  1.3.4.2  tls 	struct drm_mm_node *hole;
    181  1.3.4.2  tls 	unsigned long end = node->start + node->size;
    182  1.3.4.2  tls 	unsigned long hole_start;
    183  1.3.4.2  tls 	unsigned long hole_end;
    184  1.3.4.2  tls 
    185  1.3.4.2  tls 	BUG_ON(node == NULL);
    186  1.3.4.2  tls 
    187  1.3.4.2  tls 	/* Find the relevant hole to add our node to */
    188  1.3.4.2  tls 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
    189  1.3.4.2  tls 		if (hole_start > node->start || hole_end < end)
    190  1.3.4.2  tls 			continue;
    191  1.3.4.2  tls 
    192  1.3.4.2  tls 		node->mm = mm;
    193  1.3.4.2  tls 		node->allocated = 1;
    194  1.3.4.2  tls 
    195  1.3.4.2  tls 		INIT_LIST_HEAD(&node->hole_stack);
    196  1.3.4.2  tls 		list_add(&node->node_list, &hole->node_list);
    197  1.3.4.2  tls 
    198  1.3.4.2  tls 		if (node->start == hole_start) {
    199  1.3.4.2  tls 			hole->hole_follows = 0;
    200  1.3.4.2  tls 			list_del_init(&hole->hole_stack);
    201  1.3.4.2  tls 		}
    202  1.3.4.2  tls 
    203  1.3.4.2  tls 		node->hole_follows = 0;
    204  1.3.4.2  tls 		if (end != hole_end) {
    205  1.3.4.2  tls 			list_add(&node->hole_stack, &mm->hole_stack);
    206  1.3.4.2  tls 			node->hole_follows = 1;
    207  1.3.4.2  tls 		}
    208  1.3.4.2  tls 
    209  1.3.4.2  tls 		return 0;
    210  1.3.4.2  tls 	}
    211  1.3.4.2  tls 
    212  1.3.4.2  tls 	return -ENOSPC;
    213  1.3.4.2  tls }
    214  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_reserve_node);
    215  1.3.4.2  tls 
    216  1.3.4.2  tls /**
    217  1.3.4.2  tls  * drm_mm_insert_node_generic - search for space and insert @node
    218  1.3.4.2  tls  * @mm: drm_mm to allocate from
    219  1.3.4.2  tls  * @node: preallocate node to insert
    220  1.3.4.2  tls  * @size: size of the allocation
    221  1.3.4.2  tls  * @alignment: alignment of the allocation
    222  1.3.4.2  tls  * @color: opaque tag value to use for this node
    223  1.3.4.2  tls  * @sflags: flags to fine-tune the allocation search
    224  1.3.4.2  tls  * @aflags: flags to fine-tune the allocation behavior
    225  1.3.4.2  tls  *
    226  1.3.4.2  tls  * The preallocated node must be cleared to 0.
    227  1.3.4.2  tls  *
    228  1.3.4.2  tls  * Returns:
    229  1.3.4.2  tls  * 0 on success, -ENOSPC if there's no suitable hole.
    230  1.3.4.2  tls  */
    231  1.3.4.2  tls int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
    232  1.3.4.2  tls 			       unsigned long size, unsigned alignment,
    233  1.3.4.2  tls 			       unsigned long color,
    234  1.3.4.2  tls 			       enum drm_mm_search_flags sflags,
    235  1.3.4.2  tls 			       enum drm_mm_allocator_flags aflags)
    236  1.3.4.2  tls {
    237  1.3.4.2  tls 	struct drm_mm_node *hole_node;
    238  1.3.4.2  tls 
    239  1.3.4.2  tls 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
    240  1.3.4.2  tls 					       color, sflags);
    241  1.3.4.2  tls 	if (!hole_node)
    242  1.3.4.2  tls 		return -ENOSPC;
    243  1.3.4.2  tls 
    244  1.3.4.2  tls 	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
    245  1.3.4.2  tls 	return 0;
    246  1.3.4.2  tls }
    247  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_insert_node_generic);
    248  1.3.4.2  tls 
    249  1.3.4.2  tls static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
    250  1.3.4.2  tls 				       struct drm_mm_node *node,
    251  1.3.4.2  tls 				       unsigned long size, unsigned alignment,
    252  1.3.4.2  tls 				       unsigned long color,
    253  1.3.4.2  tls 				       unsigned long start, unsigned long end,
    254  1.3.4.2  tls 				       enum drm_mm_allocator_flags flags)
    255  1.3.4.2  tls {
    256  1.3.4.2  tls 	struct drm_mm *mm = hole_node->mm;
    257  1.3.4.2  tls 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
    258  1.3.4.2  tls 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
    259  1.3.4.2  tls 	unsigned long adj_start = hole_start;
    260  1.3.4.2  tls 	unsigned long adj_end = hole_end;
    261  1.3.4.2  tls 
    262  1.3.4.2  tls 	BUG_ON(!hole_node->hole_follows || node->allocated);
    263  1.3.4.2  tls 
    264  1.3.4.2  tls 	if (adj_start < start)
    265  1.3.4.2  tls 		adj_start = start;
    266  1.3.4.2  tls 	if (adj_end > end)
    267  1.3.4.2  tls 		adj_end = end;
    268  1.3.4.2  tls 
    269  1.3.4.2  tls 	if (flags & DRM_MM_CREATE_TOP)
    270  1.3.4.2  tls 		adj_start = adj_end - size;
    271  1.3.4.2  tls 
    272  1.3.4.2  tls 	if (mm->color_adjust)
    273  1.3.4.2  tls 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
    274  1.3.4.2  tls 
    275  1.3.4.2  tls 	if (alignment) {
    276  1.3.4.2  tls 		unsigned tmp = adj_start % alignment;
    277  1.3.4.2  tls 		if (tmp) {
    278  1.3.4.2  tls 			if (flags & DRM_MM_CREATE_TOP)
    279  1.3.4.2  tls 				adj_start -= tmp;
    280  1.3.4.2  tls 			else
    281  1.3.4.2  tls 				adj_start += alignment - tmp;
    282  1.3.4.2  tls 		}
    283  1.3.4.2  tls 	}
    284  1.3.4.2  tls 
    285  1.3.4.2  tls 	if (adj_start == hole_start) {
    286  1.3.4.2  tls 		hole_node->hole_follows = 0;
    287  1.3.4.2  tls 		list_del(&hole_node->hole_stack);
    288  1.3.4.2  tls 	}
    289  1.3.4.2  tls 
    290  1.3.4.2  tls 	node->start = adj_start;
    291  1.3.4.2  tls 	node->size = size;
    292  1.3.4.2  tls 	node->mm = mm;
    293  1.3.4.2  tls 	node->color = color;
    294  1.3.4.2  tls 	node->allocated = 1;
    295  1.3.4.2  tls 
    296  1.3.4.2  tls 	INIT_LIST_HEAD(&node->hole_stack);
    297  1.3.4.2  tls 	list_add(&node->node_list, &hole_node->node_list);
    298  1.3.4.2  tls 
    299  1.3.4.2  tls 	BUG_ON(node->start < start);
    300  1.3.4.2  tls 	BUG_ON(node->start < adj_start);
    301  1.3.4.2  tls 	BUG_ON(node->start + node->size > adj_end);
    302  1.3.4.2  tls 	BUG_ON(node->start + node->size > end);
    303  1.3.4.2  tls 
    304  1.3.4.2  tls 	node->hole_follows = 0;
    305  1.3.4.2  tls 	if (__drm_mm_hole_node_start(node) < hole_end) {
    306  1.3.4.2  tls 		list_add(&node->hole_stack, &mm->hole_stack);
    307  1.3.4.2  tls 		node->hole_follows = 1;
    308  1.3.4.2  tls 	}
    309  1.3.4.2  tls }
    310  1.3.4.2  tls 
    311  1.3.4.2  tls /**
    312  1.3.4.2  tls  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
    313  1.3.4.2  tls  * @mm: drm_mm to allocate from
    314  1.3.4.2  tls  * @node: preallocate node to insert
    315  1.3.4.2  tls  * @size: size of the allocation
    316  1.3.4.2  tls  * @alignment: alignment of the allocation
    317  1.3.4.2  tls  * @color: opaque tag value to use for this node
    318  1.3.4.2  tls  * @start: start of the allowed range for this node
    319  1.3.4.2  tls  * @end: end of the allowed range for this node
    320  1.3.4.2  tls  * @sflags: flags to fine-tune the allocation search
    321  1.3.4.2  tls  * @aflags: flags to fine-tune the allocation behavior
    322  1.3.4.2  tls  *
    323  1.3.4.2  tls  * The preallocated node must be cleared to 0.
    324  1.3.4.2  tls  *
    325  1.3.4.2  tls  * Returns:
    326  1.3.4.2  tls  * 0 on success, -ENOSPC if there's no suitable hole.
    327  1.3.4.2  tls  */
    328  1.3.4.2  tls int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
    329  1.3.4.2  tls 					unsigned long size, unsigned alignment,
    330  1.3.4.2  tls 					unsigned long color,
    331  1.3.4.2  tls 					unsigned long start, unsigned long end,
    332  1.3.4.2  tls 					enum drm_mm_search_flags sflags,
    333  1.3.4.2  tls 					enum drm_mm_allocator_flags aflags)
    334  1.3.4.2  tls {
    335  1.3.4.2  tls 	struct drm_mm_node *hole_node;
    336  1.3.4.2  tls 
    337  1.3.4.2  tls 	hole_node = drm_mm_search_free_in_range_generic(mm,
    338  1.3.4.2  tls 							size, alignment, color,
    339  1.3.4.2  tls 							start, end, sflags);
    340  1.3.4.2  tls 	if (!hole_node)
    341  1.3.4.2  tls 		return -ENOSPC;
    342  1.3.4.2  tls 
    343  1.3.4.2  tls 	drm_mm_insert_helper_range(hole_node, node,
    344  1.3.4.2  tls 				   size, alignment, color,
    345  1.3.4.2  tls 				   start, end, aflags);
    346  1.3.4.2  tls 	return 0;
    347  1.3.4.2  tls }
    348  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
    349  1.3.4.2  tls 
    350  1.3.4.2  tls /**
    351  1.3.4.2  tls  * drm_mm_remove_node - Remove a memory node from the allocator.
    352  1.3.4.2  tls  * @node: drm_mm_node to remove
    353  1.3.4.2  tls  *
    354  1.3.4.2  tls  * This just removes a node from its drm_mm allocator. The node does not need to
    355  1.3.4.2  tls  * be cleared again before it can be re-inserted into this or any other drm_mm
    356  1.3.4.2  tls  * allocator. It is a bug to call this function on a un-allocated node.
    357  1.3.4.2  tls  */
    358  1.3.4.2  tls void drm_mm_remove_node(struct drm_mm_node *node)
    359  1.3.4.2  tls {
    360  1.3.4.2  tls 	struct drm_mm *mm = node->mm;
    361  1.3.4.2  tls 	struct drm_mm_node *prev_node;
    362  1.3.4.2  tls 
    363  1.3.4.2  tls 	if (WARN_ON(!node->allocated))
    364  1.3.4.2  tls 		return;
    365  1.3.4.2  tls 
    366  1.3.4.2  tls 	BUG_ON(node->scanned_block || node->scanned_prev_free
    367  1.3.4.2  tls 				   || node->scanned_next_free);
    368  1.3.4.2  tls 
    369  1.3.4.2  tls 	prev_node =
    370  1.3.4.2  tls 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
    371  1.3.4.2  tls 
    372  1.3.4.2  tls 	if (node->hole_follows) {
    373  1.3.4.2  tls 		BUG_ON(__drm_mm_hole_node_start(node) ==
    374  1.3.4.2  tls 		       __drm_mm_hole_node_end(node));
    375  1.3.4.2  tls 		list_del(&node->hole_stack);
    376  1.3.4.2  tls 	} else
    377  1.3.4.2  tls 		BUG_ON(__drm_mm_hole_node_start(node) !=
    378  1.3.4.2  tls 		       __drm_mm_hole_node_end(node));
    379  1.3.4.2  tls 
    380  1.3.4.2  tls 
    381  1.3.4.2  tls 	if (!prev_node->hole_follows) {
    382  1.3.4.2  tls 		prev_node->hole_follows = 1;
    383  1.3.4.2  tls 		list_add(&prev_node->hole_stack, &mm->hole_stack);
    384  1.3.4.2  tls 	} else
    385  1.3.4.2  tls 		list_move(&prev_node->hole_stack, &mm->hole_stack);
    386  1.3.4.2  tls 
    387  1.3.4.2  tls 	list_del(&node->node_list);
    388  1.3.4.2  tls 	node->allocated = 0;
    389  1.3.4.2  tls }
    390  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_remove_node);
    391  1.3.4.2  tls 
    392  1.3.4.2  tls static int check_free_hole(unsigned long start, unsigned long end,
    393  1.3.4.2  tls 			   unsigned long size, unsigned alignment)
    394  1.3.4.2  tls {
    395  1.3.4.2  tls 	if (end - start < size)
    396  1.3.4.2  tls 		return 0;
    397  1.3.4.2  tls 
    398  1.3.4.2  tls 	if (alignment) {
    399  1.3.4.2  tls 		unsigned tmp = start % alignment;
    400  1.3.4.2  tls 		if (tmp)
    401  1.3.4.2  tls 			start += alignment - tmp;
    402  1.3.4.2  tls 	}
    403  1.3.4.2  tls 
    404  1.3.4.2  tls 	return end >= start + size;
    405  1.3.4.2  tls }
    406  1.3.4.2  tls 
    407  1.3.4.2  tls static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
    408  1.3.4.2  tls 						      unsigned long size,
    409  1.3.4.2  tls 						      unsigned alignment,
    410  1.3.4.2  tls 						      unsigned long color,
    411  1.3.4.2  tls 						      enum drm_mm_search_flags flags)
    412  1.3.4.2  tls {
    413  1.3.4.2  tls 	struct drm_mm_node *entry;
    414  1.3.4.2  tls 	struct drm_mm_node *best;
    415  1.3.4.2  tls 	unsigned long adj_start;
    416  1.3.4.2  tls 	unsigned long adj_end;
    417  1.3.4.2  tls 	unsigned long best_size;
    418  1.3.4.2  tls 
    419  1.3.4.2  tls 	BUG_ON(mm->scanned_blocks);
    420  1.3.4.2  tls 
    421  1.3.4.2  tls 	best = NULL;
    422  1.3.4.2  tls 	best_size = ~0UL;
    423  1.3.4.2  tls 
    424  1.3.4.2  tls 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
    425  1.3.4.2  tls 			       flags & DRM_MM_SEARCH_BELOW) {
    426  1.3.4.2  tls 		unsigned long hole_size = adj_end - adj_start;
    427  1.3.4.2  tls 
    428  1.3.4.2  tls 		if (mm->color_adjust) {
    429  1.3.4.2  tls 			mm->color_adjust(entry, color, &adj_start, &adj_end);
    430  1.3.4.2  tls 			if (adj_end <= adj_start)
    431  1.3.4.2  tls 				continue;
    432  1.3.4.2  tls 		}
    433  1.3.4.2  tls 
    434  1.3.4.2  tls 		if (!check_free_hole(adj_start, adj_end, size, alignment))
    435  1.3.4.2  tls 			continue;
    436  1.3.4.2  tls 
    437  1.3.4.2  tls 		if (!(flags & DRM_MM_SEARCH_BEST))
    438  1.3.4.2  tls 			return entry;
    439  1.3.4.2  tls 
    440  1.3.4.2  tls 		if (hole_size < best_size) {
    441  1.3.4.2  tls 			best = entry;
    442  1.3.4.2  tls 			best_size = hole_size;
    443  1.3.4.2  tls 		}
    444  1.3.4.2  tls 	}
    445  1.3.4.2  tls 
    446  1.3.4.2  tls 	return best;
    447  1.3.4.2  tls }
    448  1.3.4.2  tls 
    449  1.3.4.2  tls static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
    450  1.3.4.2  tls 							unsigned long size,
    451  1.3.4.2  tls 							unsigned alignment,
    452  1.3.4.2  tls 							unsigned long color,
    453  1.3.4.2  tls 							unsigned long start,
    454  1.3.4.2  tls 							unsigned long end,
    455  1.3.4.2  tls 							enum drm_mm_search_flags flags)
    456  1.3.4.2  tls {
    457  1.3.4.2  tls 	struct drm_mm_node *entry;
    458  1.3.4.2  tls 	struct drm_mm_node *best;
    459  1.3.4.2  tls 	unsigned long adj_start;
    460  1.3.4.2  tls 	unsigned long adj_end;
    461  1.3.4.2  tls 	unsigned long best_size;
    462  1.3.4.2  tls 
    463  1.3.4.2  tls 	BUG_ON(mm->scanned_blocks);
    464  1.3.4.2  tls 
    465  1.3.4.2  tls 	best = NULL;
    466  1.3.4.2  tls 	best_size = ~0UL;
    467  1.3.4.2  tls 
    468  1.3.4.2  tls 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
    469  1.3.4.2  tls 			       flags & DRM_MM_SEARCH_BELOW) {
    470  1.3.4.2  tls 		unsigned long hole_size = adj_end - adj_start;
    471  1.3.4.2  tls 
    472  1.3.4.2  tls 		if (adj_start < start)
    473  1.3.4.2  tls 			adj_start = start;
    474  1.3.4.2  tls 		if (adj_end > end)
    475  1.3.4.2  tls 			adj_end = end;
    476  1.3.4.2  tls 
    477  1.3.4.2  tls 		if (mm->color_adjust) {
    478  1.3.4.2  tls 			mm->color_adjust(entry, color, &adj_start, &adj_end);
    479  1.3.4.2  tls 			if (adj_end <= adj_start)
    480  1.3.4.2  tls 				continue;
    481  1.3.4.2  tls 		}
    482  1.3.4.2  tls 
    483  1.3.4.2  tls 		if (!check_free_hole(adj_start, adj_end, size, alignment))
    484  1.3.4.2  tls 			continue;
    485  1.3.4.2  tls 
    486  1.3.4.2  tls 		if (!(flags & DRM_MM_SEARCH_BEST))
    487  1.3.4.2  tls 			return entry;
    488  1.3.4.2  tls 
    489  1.3.4.2  tls 		if (hole_size < best_size) {
    490  1.3.4.2  tls 			best = entry;
    491  1.3.4.2  tls 			best_size = hole_size;
    492  1.3.4.2  tls 		}
    493  1.3.4.2  tls 	}
    494  1.3.4.2  tls 
    495  1.3.4.2  tls 	return best;
    496  1.3.4.2  tls }
    497  1.3.4.2  tls 
    498  1.3.4.2  tls /**
    499  1.3.4.2  tls  * drm_mm_replace_node - move an allocation from @old to @new
    500  1.3.4.2  tls  * @old: drm_mm_node to remove from the allocator
    501  1.3.4.2  tls  * @new: drm_mm_node which should inherit @old's allocation
    502  1.3.4.2  tls  *
    503  1.3.4.2  tls  * This is useful for when drivers embed the drm_mm_node structure and hence
    504  1.3.4.2  tls  * can't move allocations by reassigning pointers. It's a combination of remove
    505  1.3.4.2  tls  * and insert with the guarantee that the allocation start will match.
    506  1.3.4.2  tls  */
    507  1.3.4.2  tls void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
    508  1.3.4.2  tls {
    509  1.3.4.2  tls 	list_replace(&old->node_list, &new->node_list);
    510  1.3.4.2  tls 	list_replace(&old->hole_stack, &new->hole_stack);
    511  1.3.4.2  tls 	new->hole_follows = old->hole_follows;
    512  1.3.4.2  tls 	new->mm = old->mm;
    513  1.3.4.2  tls 	new->start = old->start;
    514  1.3.4.2  tls 	new->size = old->size;
    515  1.3.4.2  tls 	new->color = old->color;
    516  1.3.4.2  tls 
    517  1.3.4.2  tls 	old->allocated = 0;
    518  1.3.4.2  tls 	new->allocated = 1;
    519  1.3.4.2  tls }
    520  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_replace_node);
    521  1.3.4.2  tls 
    522  1.3.4.2  tls /**
    523  1.3.4.2  tls  * DOC: lru scan roaster
    524  1.3.4.2  tls  *
    525  1.3.4.2  tls  * Very often GPUs need to have continuous allocations for a given object. When
    526  1.3.4.2  tls  * evicting objects to make space for a new one it is therefore not most
    527  1.3.4.2  tls  * efficient when we simply start to select all objects from the tail of an LRU
    528  1.3.4.2  tls  * until there's a suitable hole: Especially for big objects or nodes that
    529  1.3.4.2  tls  * otherwise have special allocation constraints there's a good chance we evict
    530  1.3.4.2  tls  * lots of (smaller) objects unecessarily.
    531  1.3.4.2  tls  *
    532  1.3.4.2  tls  * The DRM range allocator supports this use-case through the scanning
    533  1.3.4.2  tls  * interfaces. First a scan operation needs to be initialized with
    534  1.3.4.2  tls  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
    535  1.3.4.2  tls  * objects to the roaster (probably by walking an LRU list, but this can be
    536  1.3.4.2  tls  * freely implemented) until a suitable hole is found or there's no further
    537  1.3.4.2  tls  * evitable object.
    538  1.3.4.2  tls  *
    539  1.3.4.2  tls  * The the driver must walk through all objects again in exactly the reverse
    540  1.3.4.2  tls  * order to restore the allocator state. Note that while the allocator is used
    541  1.3.4.2  tls  * in the scan mode no other operation is allowed.
    542  1.3.4.2  tls  *
    543  1.3.4.2  tls  * Finally the driver evicts all objects selected in the scan. Adding and
    544  1.3.4.2  tls  * removing an object is O(1), and since freeing a node is also O(1) the overall
    545  1.3.4.2  tls  * complexity is O(scanned_objects). So like the free stack which needs to be
    546  1.3.4.2  tls  * walked before a scan operation even begins this is linear in the number of
    547  1.3.4.2  tls  * objects. It doesn't seem to hurt badly.
    548  1.3.4.2  tls  */
    549  1.3.4.2  tls 
    550  1.3.4.2  tls /**
    551  1.3.4.2  tls  * drm_mm_init_scan - initialize lru scanning
    552  1.3.4.2  tls  * @mm: drm_mm to scan
    553  1.3.4.2  tls  * @size: size of the allocation
    554  1.3.4.2  tls  * @alignment: alignment of the allocation
    555  1.3.4.2  tls  * @color: opaque tag value to use for the allocation
    556  1.3.4.2  tls  *
    557  1.3.4.2  tls  * This simply sets up the scanning routines with the parameters for the desired
    558  1.3.4.2  tls  * hole. Note that there's no need to specify allocation flags, since they only
    559  1.3.4.2  tls  * change the place a node is allocated from within a suitable hole.
    560  1.3.4.2  tls  *
    561  1.3.4.2  tls  * Warning:
    562  1.3.4.2  tls  * As long as the scan list is non-empty, no other operations than
    563  1.3.4.2  tls  * adding/removing nodes to/from the scan list are allowed.
    564  1.3.4.2  tls  */
    565  1.3.4.2  tls void drm_mm_init_scan(struct drm_mm *mm,
    566  1.3.4.2  tls 		      unsigned long size,
    567  1.3.4.2  tls 		      unsigned alignment,
    568  1.3.4.2  tls 		      unsigned long color)
    569  1.3.4.2  tls {
    570  1.3.4.2  tls 	mm->scan_color = color;
    571  1.3.4.2  tls 	mm->scan_alignment = alignment;
    572  1.3.4.2  tls 	mm->scan_size = size;
    573  1.3.4.2  tls 	mm->scanned_blocks = 0;
    574  1.3.4.2  tls 	mm->scan_hit_start = 0;
    575  1.3.4.2  tls 	mm->scan_hit_end = 0;
    576  1.3.4.2  tls 	mm->scan_check_range = 0;
    577  1.3.4.2  tls 	mm->prev_scanned_node = NULL;
    578  1.3.4.2  tls }
    579  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_init_scan);
    580  1.3.4.2  tls 
    581  1.3.4.2  tls /**
    582  1.3.4.2  tls  * drm_mm_init_scan - initialize range-restricted lru scanning
    583  1.3.4.2  tls  * @mm: drm_mm to scan
    584  1.3.4.2  tls  * @size: size of the allocation
    585  1.3.4.2  tls  * @alignment: alignment of the allocation
    586  1.3.4.2  tls  * @color: opaque tag value to use for the allocation
    587  1.3.4.2  tls  * @start: start of the allowed range for the allocation
    588  1.3.4.2  tls  * @end: end of the allowed range for the allocation
    589  1.3.4.2  tls  *
    590  1.3.4.2  tls  * This simply sets up the scanning routines with the parameters for the desired
    591  1.3.4.2  tls  * hole. Note that there's no need to specify allocation flags, since they only
    592  1.3.4.2  tls  * change the place a node is allocated from within a suitable hole.
    593  1.3.4.2  tls  *
    594  1.3.4.2  tls  * Warning:
    595  1.3.4.2  tls  * As long as the scan list is non-empty, no other operations than
    596  1.3.4.2  tls  * adding/removing nodes to/from the scan list are allowed.
    597  1.3.4.2  tls  */
    598  1.3.4.2  tls void drm_mm_init_scan_with_range(struct drm_mm *mm,
    599  1.3.4.2  tls 				 unsigned long size,
    600  1.3.4.2  tls 				 unsigned alignment,
    601  1.3.4.2  tls 				 unsigned long color,
    602  1.3.4.2  tls 				 unsigned long start,
    603  1.3.4.2  tls 				 unsigned long end)
    604  1.3.4.2  tls {
    605  1.3.4.2  tls 	mm->scan_color = color;
    606  1.3.4.2  tls 	mm->scan_alignment = alignment;
    607  1.3.4.2  tls 	mm->scan_size = size;
    608  1.3.4.2  tls 	mm->scanned_blocks = 0;
    609  1.3.4.2  tls 	mm->scan_hit_start = 0;
    610  1.3.4.2  tls 	mm->scan_hit_end = 0;
    611  1.3.4.2  tls 	mm->scan_start = start;
    612  1.3.4.2  tls 	mm->scan_end = end;
    613  1.3.4.2  tls 	mm->scan_check_range = 1;
    614  1.3.4.2  tls 	mm->prev_scanned_node = NULL;
    615  1.3.4.2  tls }
    616  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_init_scan_with_range);
    617  1.3.4.2  tls 
    618  1.3.4.2  tls /**
    619  1.3.4.2  tls  * drm_mm_scan_add_block - add a node to the scan list
    620  1.3.4.2  tls  * @node: drm_mm_node to add
    621  1.3.4.2  tls  *
    622  1.3.4.2  tls  * Add a node to the scan list that might be freed to make space for the desired
    623  1.3.4.2  tls  * hole.
    624  1.3.4.2  tls  *
    625  1.3.4.2  tls  * Returns:
    626  1.3.4.2  tls  * True if a hole has been found, false otherwise.
    627  1.3.4.2  tls  */
    628  1.3.4.2  tls bool drm_mm_scan_add_block(struct drm_mm_node *node)
    629  1.3.4.2  tls {
    630  1.3.4.2  tls 	struct drm_mm *mm = node->mm;
    631  1.3.4.2  tls 	struct drm_mm_node *prev_node;
    632  1.3.4.2  tls 	unsigned long hole_start, hole_end;
    633  1.3.4.2  tls 	unsigned long adj_start, adj_end;
    634  1.3.4.2  tls 
    635  1.3.4.2  tls 	mm->scanned_blocks++;
    636  1.3.4.2  tls 
    637  1.3.4.2  tls 	BUG_ON(node->scanned_block);
    638  1.3.4.2  tls 	node->scanned_block = 1;
    639  1.3.4.2  tls 
    640  1.3.4.2  tls 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
    641  1.3.4.2  tls 			       node_list);
    642  1.3.4.2  tls 
    643  1.3.4.2  tls 	node->scanned_preceeds_hole = prev_node->hole_follows;
    644  1.3.4.2  tls 	prev_node->hole_follows = 1;
    645  1.3.4.2  tls 	list_del(&node->node_list);
    646  1.3.4.2  tls 	node->node_list.prev = &prev_node->node_list;
    647  1.3.4.2  tls 	node->node_list.next = &mm->prev_scanned_node->node_list;
    648  1.3.4.2  tls 	mm->prev_scanned_node = node;
    649  1.3.4.2  tls 
    650  1.3.4.2  tls 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
    651  1.3.4.2  tls 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
    652  1.3.4.2  tls 
    653  1.3.4.2  tls 	if (mm->scan_check_range) {
    654  1.3.4.2  tls 		if (adj_start < mm->scan_start)
    655  1.3.4.2  tls 			adj_start = mm->scan_start;
    656  1.3.4.2  tls 		if (adj_end > mm->scan_end)
    657  1.3.4.2  tls 			adj_end = mm->scan_end;
    658  1.3.4.2  tls 	}
    659  1.3.4.2  tls 
    660  1.3.4.2  tls 	if (mm->color_adjust)
    661  1.3.4.2  tls 		mm->color_adjust(prev_node, mm->scan_color,
    662  1.3.4.2  tls 				 &adj_start, &adj_end);
    663  1.3.4.2  tls 
    664  1.3.4.2  tls 	if (check_free_hole(adj_start, adj_end,
    665  1.3.4.2  tls 			    mm->scan_size, mm->scan_alignment)) {
    666  1.3.4.2  tls 		mm->scan_hit_start = hole_start;
    667  1.3.4.2  tls 		mm->scan_hit_end = hole_end;
    668  1.3.4.2  tls 		return true;
    669  1.3.4.2  tls 	}
    670  1.3.4.2  tls 
    671  1.3.4.2  tls 	return false;
    672  1.3.4.2  tls }
    673  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_scan_add_block);
    674  1.3.4.2  tls 
    675  1.3.4.2  tls /**
    676  1.3.4.2  tls  * drm_mm_scan_remove_block - remove a node from the scan list
    677  1.3.4.2  tls  * @node: drm_mm_node to remove
    678  1.3.4.2  tls  *
    679  1.3.4.2  tls  * Nodes _must_ be removed in the exact same order from the scan list as they
    680  1.3.4.2  tls  * have been added, otherwise the internal state of the memory manager will be
    681  1.3.4.2  tls  * corrupted.
    682  1.3.4.2  tls  *
    683  1.3.4.2  tls  * When the scan list is empty, the selected memory nodes can be freed. An
    684  1.3.4.2  tls  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
    685  1.3.4.2  tls  * return the just freed block (because its at the top of the free_stack list).
    686  1.3.4.2  tls  *
    687  1.3.4.2  tls  * Returns:
    688  1.3.4.2  tls  * True if this block should be evicted, false otherwise. Will always
    689  1.3.4.2  tls  * return false when no hole has been found.
    690  1.3.4.2  tls  */
    691  1.3.4.2  tls bool drm_mm_scan_remove_block(struct drm_mm_node *node)
    692  1.3.4.2  tls {
    693  1.3.4.2  tls 	struct drm_mm *mm = node->mm;
    694  1.3.4.2  tls 	struct drm_mm_node *prev_node;
    695  1.3.4.2  tls 
    696  1.3.4.2  tls 	mm->scanned_blocks--;
    697  1.3.4.2  tls 
    698  1.3.4.2  tls 	BUG_ON(!node->scanned_block);
    699  1.3.4.2  tls 	node->scanned_block = 0;
    700  1.3.4.2  tls 
    701  1.3.4.2  tls 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
    702  1.3.4.2  tls 			       node_list);
    703  1.3.4.2  tls 
    704  1.3.4.2  tls 	prev_node->hole_follows = node->scanned_preceeds_hole;
    705  1.3.4.2  tls 	list_add(&node->node_list, &prev_node->node_list);
    706  1.3.4.2  tls 
    707  1.3.4.2  tls 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
    708  1.3.4.2  tls 		 node->start < mm->scan_hit_end);
    709  1.3.4.2  tls }
    710  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_scan_remove_block);
    711  1.3.4.2  tls 
    712  1.3.4.2  tls /**
    713  1.3.4.2  tls  * drm_mm_clean - checks whether an allocator is clean
    714  1.3.4.2  tls  * @mm: drm_mm allocator to check
    715  1.3.4.2  tls  *
    716  1.3.4.2  tls  * Returns:
    717  1.3.4.2  tls  * True if the allocator is completely free, false if there's still a node
    718  1.3.4.2  tls  * allocated in it.
    719  1.3.4.2  tls  */
    720  1.3.4.2  tls bool drm_mm_clean(struct drm_mm * mm)
    721  1.3.4.2  tls {
    722  1.3.4.2  tls 	struct list_head *head = &mm->head_node.node_list;
    723  1.3.4.2  tls 
    724  1.3.4.2  tls 	return (head->next->next == head);
    725  1.3.4.2  tls }
    726  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_clean);
    727  1.3.4.2  tls 
    728  1.3.4.2  tls /**
    729  1.3.4.2  tls  * drm_mm_init - initialize a drm-mm allocator
    730  1.3.4.2  tls  * @mm: the drm_mm structure to initialize
    731  1.3.4.2  tls  * @start: start of the range managed by @mm
    732  1.3.4.2  tls  * @size: end of the range managed by @mm
    733  1.3.4.2  tls  *
    734  1.3.4.2  tls  * Note that @mm must be cleared to 0 before calling this function.
    735  1.3.4.2  tls  */
    736  1.3.4.2  tls void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
    737  1.3.4.2  tls {
    738  1.3.4.2  tls 	INIT_LIST_HEAD(&mm->hole_stack);
    739  1.3.4.2  tls 	mm->scanned_blocks = 0;
    740  1.3.4.2  tls 
    741  1.3.4.2  tls 	/* Clever trick to avoid a special case in the free hole tracking. */
    742  1.3.4.2  tls 	INIT_LIST_HEAD(&mm->head_node.node_list);
    743  1.3.4.2  tls 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
    744  1.3.4.2  tls 	mm->head_node.hole_follows = 1;
    745  1.3.4.2  tls 	mm->head_node.scanned_block = 0;
    746  1.3.4.2  tls 	mm->head_node.scanned_prev_free = 0;
    747  1.3.4.2  tls 	mm->head_node.scanned_next_free = 0;
    748  1.3.4.2  tls 	mm->head_node.mm = mm;
    749  1.3.4.2  tls 	mm->head_node.start = start + size;
    750  1.3.4.2  tls 	mm->head_node.size = start - mm->head_node.start;
    751  1.3.4.2  tls 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
    752  1.3.4.2  tls 
    753  1.3.4.2  tls 	mm->color_adjust = NULL;
    754  1.3.4.2  tls }
    755  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_init);
    756  1.3.4.2  tls 
    757  1.3.4.2  tls /**
    758  1.3.4.2  tls  * drm_mm_takedown - clean up a drm_mm allocator
    759  1.3.4.2  tls  * @mm: drm_mm allocator to clean up
    760  1.3.4.2  tls  *
    761  1.3.4.2  tls  * Note that it is a bug to call this function on an allocator which is not
    762  1.3.4.2  tls  * clean.
    763  1.3.4.2  tls  */
    764  1.3.4.2  tls void drm_mm_takedown(struct drm_mm * mm)
    765  1.3.4.2  tls {
    766  1.3.4.2  tls 	WARN(!list_empty(&mm->head_node.node_list),
    767  1.3.4.2  tls 	     "Memory manager not clean during takedown.\n");
    768  1.3.4.2  tls }
    769  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_takedown);
    770  1.3.4.2  tls 
    771  1.3.4.2  tls static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
    772  1.3.4.2  tls 				       const char *prefix)
    773  1.3.4.2  tls {
    774  1.3.4.2  tls 	unsigned long hole_start, hole_end, hole_size;
    775  1.3.4.2  tls 
    776  1.3.4.2  tls 	if (entry->hole_follows) {
    777  1.3.4.2  tls 		hole_start = drm_mm_hole_node_start(entry);
    778  1.3.4.2  tls 		hole_end = drm_mm_hole_node_end(entry);
    779  1.3.4.2  tls 		hole_size = hole_end - hole_start;
    780  1.3.4.2  tls 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
    781  1.3.4.2  tls 			prefix, hole_start, hole_end,
    782  1.3.4.2  tls 			hole_size);
    783  1.3.4.2  tls 		return hole_size;
    784  1.3.4.2  tls 	}
    785  1.3.4.2  tls 
    786  1.3.4.2  tls 	return 0;
    787  1.3.4.2  tls }
    788  1.3.4.2  tls 
    789  1.3.4.2  tls /**
    790  1.3.4.2  tls  * drm_mm_debug_table - dump allocator state to dmesg
    791  1.3.4.2  tls  * @mm: drm_mm allocator to dump
    792  1.3.4.2  tls  * @prefix: prefix to use for dumping to dmesg
    793  1.3.4.2  tls  */
    794  1.3.4.2  tls void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
    795  1.3.4.2  tls {
    796  1.3.4.2  tls 	struct drm_mm_node *entry;
    797  1.3.4.2  tls 	unsigned long total_used = 0, total_free = 0, total = 0;
    798  1.3.4.2  tls 
    799  1.3.4.2  tls 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
    800  1.3.4.2  tls 
    801  1.3.4.2  tls 	drm_mm_for_each_node(entry, mm) {
    802  1.3.4.2  tls 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
    803  1.3.4.2  tls 			prefix, entry->start, entry->start + entry->size,
    804  1.3.4.2  tls 			entry->size);
    805  1.3.4.2  tls 		total_used += entry->size;
    806  1.3.4.2  tls 		total_free += drm_mm_debug_hole(entry, prefix);
    807  1.3.4.2  tls 	}
    808  1.3.4.2  tls 	total = total_free + total_used;
    809  1.3.4.2  tls 
    810  1.3.4.2  tls 	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
    811  1.3.4.2  tls 		total_used, total_free);
    812  1.3.4.2  tls }
    813  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_debug_table);
    814  1.3.4.2  tls 
    815  1.3.4.2  tls #if defined(CONFIG_DEBUG_FS)
    816  1.3.4.2  tls static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
    817  1.3.4.2  tls {
    818  1.3.4.2  tls 	unsigned long hole_start, hole_end, hole_size;
    819  1.3.4.2  tls 
    820  1.3.4.2  tls 	if (entry->hole_follows) {
    821  1.3.4.2  tls 		hole_start = drm_mm_hole_node_start(entry);
    822  1.3.4.2  tls 		hole_end = drm_mm_hole_node_end(entry);
    823  1.3.4.2  tls 		hole_size = hole_end - hole_start;
    824  1.3.4.2  tls 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
    825  1.3.4.2  tls 				hole_start, hole_end, hole_size);
    826  1.3.4.2  tls 		return hole_size;
    827  1.3.4.2  tls 	}
    828  1.3.4.2  tls 
    829  1.3.4.2  tls 	return 0;
    830  1.3.4.2  tls }
    831  1.3.4.2  tls 
    832  1.3.4.2  tls /**
    833  1.3.4.2  tls  * drm_mm_dump_table - dump allocator state to a seq_file
    834  1.3.4.2  tls  * @m: seq_file to dump to
    835  1.3.4.2  tls  * @mm: drm_mm allocator to dump
    836  1.3.4.2  tls  */
    837  1.3.4.2  tls int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
    838  1.3.4.2  tls {
    839  1.3.4.2  tls 	struct drm_mm_node *entry;
    840  1.3.4.2  tls 	unsigned long total_used = 0, total_free = 0, total = 0;
    841  1.3.4.2  tls 
    842  1.3.4.2  tls 	total_free += drm_mm_dump_hole(m, &mm->head_node);
    843  1.3.4.2  tls 
    844  1.3.4.2  tls 	drm_mm_for_each_node(entry, mm) {
    845  1.3.4.2  tls 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
    846  1.3.4.2  tls 				entry->start, entry->start + entry->size,
    847  1.3.4.2  tls 				entry->size);
    848  1.3.4.2  tls 		total_used += entry->size;
    849  1.3.4.2  tls 		total_free += drm_mm_dump_hole(m, entry);
    850  1.3.4.2  tls 	}
    851  1.3.4.2  tls 	total = total_free + total_used;
    852  1.3.4.2  tls 
    853  1.3.4.2  tls 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
    854  1.3.4.2  tls 	return 0;
    855  1.3.4.2  tls }
    856  1.3.4.2  tls EXPORT_SYMBOL(drm_mm_dump_table);
    857  1.3.4.2  tls #endif
    858