Home | History | Annotate | Line # | Download | only in drm
drm_mm.h revision 1.6
      1 /*	$NetBSD: drm_mm.h,v 1.6 2021/12/18 23:45:46 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
      6  * Copyright 2016 Intel Corporation
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  *
     30  **************************************************************************/
     31 /*
     32  * Authors:
     33  * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
     34  */
     35 
     36 #ifndef _DRM_MM_H_
     37 #define _DRM_MM_H_
     38 
     39 /*
     40  * Generic range manager structs
     41  */
     42 #include <linux/bug.h>
     43 #include <linux/rbtree.h>
     44 #include <linux/kernel.h>
     45 #include <linux/mm_types.h>
     46 #include <linux/list.h>
     47 #include <linux/spinlock.h>
     48 #ifdef CONFIG_DRM_DEBUG_MM
     49 #include <linux/stackdepot.h>
     50 #endif
     51 #include <drm/drm_print.h>
     52 
     53 #ifdef CONFIG_DRM_DEBUG_MM
     54 #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
     55 #else
     56 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
     57 #endif
     58 
     59 /**
     60  * enum drm_mm_insert_mode - control search and allocation behaviour
     61  *
     62  * The &struct drm_mm range manager supports finding a suitable modes using
     63  * a number of search trees. These trees are oranised by size, by address and
     64  * in most recent eviction order. This allows the user to find either the
     65  * smallest hole to reuse, the lowest or highest address to reuse, or simply
     66  * reuse the most recent eviction that fits. When allocating the &drm_mm_node
     67  * from within the hole, the &drm_mm_insert_mode also dictate whether to
     68  * allocate the lowest matching address or the highest.
     69  */
     70 enum drm_mm_insert_mode {
     71 	/**
     72 	 * @DRM_MM_INSERT_BEST:
     73 	 *
     74 	 * Search for the smallest hole (within the search range) that fits
     75 	 * the desired node.
     76 	 *
     77 	 * Allocates the node from the bottom of the found hole.
     78 	 */
     79 	DRM_MM_INSERT_BEST = 0,
     80 
     81 	/**
     82 	 * @DRM_MM_INSERT_LOW:
     83 	 *
     84 	 * Search for the lowest hole (address closest to 0, within the search
     85 	 * range) that fits the desired node.
     86 	 *
     87 	 * Allocates the node from the bottom of the found hole.
     88 	 */
     89 	DRM_MM_INSERT_LOW,
     90 
     91 	/**
     92 	 * @DRM_MM_INSERT_HIGH:
     93 	 *
     94 	 * Search for the highest hole (address closest to U64_MAX, within the
     95 	 * search range) that fits the desired node.
     96 	 *
     97 	 * Allocates the node from the *top* of the found hole. The specified
     98 	 * alignment for the node is applied to the base of the node
     99 	 * (&drm_mm_node.start).
    100 	 */
    101 	DRM_MM_INSERT_HIGH,
    102 
    103 	/**
    104 	 * @DRM_MM_INSERT_EVICT:
    105 	 *
    106 	 * Search for the most recently evicted hole (within the search range)
    107 	 * that fits the desired node. This is appropriate for use immediately
    108 	 * after performing an eviction scan (see drm_mm_scan_init()) and
    109 	 * removing the selected nodes to form a hole.
    110 	 *
    111 	 * Allocates the node from the bottom of the found hole.
    112 	 */
    113 	DRM_MM_INSERT_EVICT,
    114 
    115 	/**
    116 	 * @DRM_MM_INSERT_ONCE:
    117 	 *
    118 	 * Only check the first hole for suitablity and report -ENOSPC
    119 	 * immediately otherwise, rather than check every hole until a
    120 	 * suitable one is found. Can only be used in conjunction with another
    121 	 * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
    122 	 */
    123 	DRM_MM_INSERT_ONCE = BIT(31),
    124 
    125 	/**
    126 	 * @DRM_MM_INSERT_HIGHEST:
    127 	 *
    128 	 * Only check the highest hole (the hole with the largest address) and
    129 	 * insert the node at the top of the hole or report -ENOSPC if
    130 	 * unsuitable.
    131 	 *
    132 	 * Does not search all holes.
    133 	 */
    134 	DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
    135 
    136 	/**
    137 	 * @DRM_MM_INSERT_LOWEST:
    138 	 *
    139 	 * Only check the lowest hole (the hole with the smallest address) and
    140 	 * insert the node at the bottom of the hole or report -ENOSPC if
    141 	 * unsuitable.
    142 	 *
    143 	 * Does not search all holes.
    144 	 */
    145 	DRM_MM_INSERT_LOWEST  = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
    146 };
    147 
    148 /**
    149  * struct drm_mm_node - allocated block in the DRM allocator
    150  *
    151  * This represents an allocated block in a &drm_mm allocator. Except for
    152  * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
    153  * entirely opaque and should only be accessed through the provided funcions.
    154  * Since allocation of these nodes is entirely handled by the driver they can be
    155  * embedded.
    156  */
    157 struct drm_mm_node {
    158 	/** @color: Opaque driver-private tag. */
    159 	unsigned long color;
    160 	/** @start: Start address of the allocated block. */
    161 	u64 start;
    162 	/** @size: Size of the allocated block. */
    163 	u64 size;
    164 	/* private: */
    165 	struct drm_mm *mm;
    166 	struct list_head node_list;
    167 	struct list_head hole_stack;
    168 	struct rb_node rb;
    169 	struct rb_node rb_hole_size;
    170 	struct rb_node rb_hole_addr;
    171 	u64 __subtree_last;
    172 	u64 hole_size;
    173 	unsigned long flags;
    174 #define DRM_MM_NODE_ALLOCATED_BIT	0
    175 #define DRM_MM_NODE_SCANNED_BIT		1
    176 #ifdef CONFIG_DRM_DEBUG_MM
    177 	depot_stack_handle_t stack;
    178 #endif
    179 };
    180 
    181 /**
    182  * struct drm_mm - DRM allocator
    183  *
    184  * DRM range allocator with a few special functions and features geared towards
    185  * managing GPU memory. Except for the @color_adjust callback the structure is
    186  * entirely opaque and should only be accessed through the provided functions
    187  * and macros. This structure can be embedded into larger driver structures.
    188  */
    189 struct drm_mm {
    190 	/**
    191 	 * @color_adjust:
    192 	 *
    193 	 * Optional driver callback to further apply restrictions on a hole. The
    194 	 * node argument points at the node containing the hole from which the
    195 	 * block would be allocated (see drm_mm_hole_follows() and friends). The
    196 	 * other arguments are the size of the block to be allocated. The driver
    197 	 * can adjust the start and end as needed to e.g. insert guard pages.
    198 	 */
    199 	void (*color_adjust)(const struct drm_mm_node *node,
    200 			     unsigned long color,
    201 			     u64 *start, u64 *end);
    202 
    203 	/* private: */
    204 	/* List of all memory nodes that immediately precede a free hole. */
    205 	struct list_head hole_stack;
    206 	/* head_node.node_list is the list of all memory nodes, ordered
    207 	 * according to the (increasing) start address of the memory node. */
    208 	struct drm_mm_node head_node;
    209 	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
    210 	struct rb_root_cached interval_tree;
    211 	struct rb_root_cached holes_size;
    212 	struct rb_root holes_addr;
    213 
    214 	unsigned long scan_active;
    215 };
    216 
    217 /**
    218  * struct drm_mm_scan - DRM allocator eviction roaster data
    219  *
    220  * This structure tracks data needed for the eviction roaster set up using
    221  * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
    222  * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
    223  * be accessed through the provided functions and macros. It is meant to be
    224  * allocated temporarily by the driver on the stack.
    225  */
    226 struct drm_mm_scan {
    227 	/* private: */
    228 	struct drm_mm *mm;
    229 
    230 	u64 size;
    231 	u64 alignment;
    232 	u64 remainder_mask;
    233 
    234 	u64 range_start;
    235 	u64 range_end;
    236 
    237 	u64 hit_start;
    238 	u64 hit_end;
    239 
    240 	unsigned long color;
    241 	enum drm_mm_insert_mode mode;
    242 };
    243 
    244 /**
    245  * drm_mm_node_allocated - checks whether a node is allocated
    246  * @node: drm_mm_node to check
    247  *
    248  * Drivers are required to clear a node prior to using it with the
    249  * drm_mm range manager.
    250  *
    251  * Drivers should use this helper for proper encapsulation of drm_mm
    252  * internals.
    253  *
    254  * Returns:
    255  * True if the @node is allocated.
    256  */
    257 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
    258 {
    259 	return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    260 }
    261 
    262 /**
    263  * drm_mm_initialized - checks whether an allocator is initialized
    264  * @mm: drm_mm to check
    265  *
    266  * Drivers should clear the struct drm_mm prior to initialisation if they
    267  * want to use this function.
    268  *
    269  * Drivers should use this helper for proper encapsulation of drm_mm
    270  * internals.
    271  *
    272  * Returns:
    273  * True if the @mm is initialized.
    274  */
    275 static inline bool drm_mm_initialized(const struct drm_mm *mm)
    276 {
    277 	return mm->hole_stack.next;
    278 }
    279 
    280 /**
    281  * drm_mm_hole_follows - checks whether a hole follows this node
    282  * @node: drm_mm_node to check
    283  *
    284  * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
    285  * If you wish to know whether a hole follows this particular node,
    286  * query this function. See also drm_mm_hole_node_start() and
    287  * drm_mm_hole_node_end().
    288  *
    289  * Returns:
    290  * True if a hole follows the @node.
    291  */
    292 static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
    293 {
    294 	return node->hole_size;
    295 }
    296 
    297 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
    298 {
    299 	return hole_node->start + hole_node->size;
    300 }
    301 
    302 /**
    303  * drm_mm_hole_node_start - computes the start of the hole following @node
    304  * @hole_node: drm_mm_node which implicitly tracks the following hole
    305  *
    306  * This is useful for driver-specific debug dumpers. Otherwise drivers should
    307  * not inspect holes themselves. Drivers must check first whether a hole indeed
    308  * follows by looking at drm_mm_hole_follows()
    309  *
    310  * Returns:
    311  * Start of the subsequent hole.
    312  */
    313 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
    314 {
    315 	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
    316 	return __drm_mm_hole_node_start(hole_node);
    317 }
    318 
    319 static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
    320 {
    321 	return list_next_entry(hole_node, node_list)->start;
    322 }
    323 
    324 /**
    325  * drm_mm_hole_node_end - computes the end of the hole following @node
    326  * @hole_node: drm_mm_node which implicitly tracks the following hole
    327  *
    328  * This is useful for driver-specific debug dumpers. Otherwise drivers should
    329  * not inspect holes themselves. Drivers must check first whether a hole indeed
    330  * follows by looking at drm_mm_hole_follows().
    331  *
    332  * Returns:
    333  * End of the subsequent hole.
    334  */
    335 static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
    336 {
    337 	return __drm_mm_hole_node_end(hole_node);
    338 }
    339 
    340 /**
    341  * drm_mm_nodes - list of nodes under the drm_mm range manager
    342  * @mm: the struct drm_mm range manger
    343  *
    344  * As the drm_mm range manager hides its node_list deep with its
    345  * structure, extracting it looks painful and repetitive. This is
    346  * not expected to be used outside of the drm_mm_for_each_node()
    347  * macros and similar internal functions.
    348  *
    349  * Returns:
    350  * The node list, may be empty.
    351  */
    352 #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
    353 
    354 /**
    355  * drm_mm_for_each_node - iterator to walk over all allocated nodes
    356  * @entry: &struct drm_mm_node to assign to in each iteration step
    357  * @mm: &drm_mm allocator to walk
    358  *
    359  * This iterator walks over all nodes in the range allocator. It is implemented
    360  * with list_for_each(), so not save against removal of elements.
    361  */
    362 #define drm_mm_for_each_node(entry, mm) \
    363 	list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
    364 
    365 /**
    366  * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
    367  * @entry: &struct drm_mm_node to assign to in each iteration step
    368  * @next: &struct drm_mm_node to store the next step
    369  * @mm: &drm_mm allocator to walk
    370  *
    371  * This iterator walks over all nodes in the range allocator. It is implemented
    372  * with list_for_each_safe(), so save against removal of elements.
    373  */
    374 #define drm_mm_for_each_node_safe(entry, next, mm) \
    375 	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
    376 
    377 /**
    378  * drm_mm_for_each_hole - iterator to walk over all holes
    379  * @pos: &drm_mm_node used internally to track progress
    380  * @mm: &drm_mm allocator to walk
    381  * @hole_start: ulong variable to assign the hole start to on each iteration
    382  * @hole_end: ulong variable to assign the hole end to on each iteration
    383  *
    384  * This iterator walks over all holes in the range allocator. It is implemented
    385  * with list_for_each(), so not save against removal of elements. @entry is used
    386  * internally and will not reflect a real drm_mm_node for the very first hole.
    387  * Hence users of this iterator may not access it.
    388  *
    389  * Implementation Note:
    390  * We need to inline list_for_each_entry in order to be able to set hole_start
    391  * and hole_end on each iteration while keeping the macro sane.
    392  */
    393 #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
    394 	for (pos = list_first_entry(&(mm)->hole_stack, \
    395 				    typeof(*pos), hole_stack); \
    396 	     &pos->hole_stack != &(mm)->hole_stack ? \
    397 	     hole_start = drm_mm_hole_node_start(pos), \
    398 	     hole_end = hole_start + pos->hole_size, \
    399 	     1 : 0; \
    400 	     pos = list_next_entry(pos, hole_stack))
    401 
    402 /*
    403  * Basic range manager support (drm_mm.c)
    404  */
    405 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
    406 int drm_mm_insert_node_in_range(struct drm_mm *mm,
    407 				struct drm_mm_node *node,
    408 				u64 size,
    409 				u64 alignment,
    410 				unsigned long color,
    411 				u64 start,
    412 				u64 end,
    413 				enum drm_mm_insert_mode mode);
    414 
    415 /**
    416  * drm_mm_insert_node_generic - search for space and insert @node
    417  * @mm: drm_mm to allocate from
    418  * @node: preallocate node to insert
    419  * @size: size of the allocation
    420  * @alignment: alignment of the allocation
    421  * @color: opaque tag value to use for this node
    422  * @mode: fine-tune the allocation search and placement
    423  *
    424  * This is a simplified version of drm_mm_insert_node_in_range() with no
    425  * range restrictions applied.
    426  *
    427  * The preallocated node must be cleared to 0.
    428  *
    429  * Returns:
    430  * 0 on success, -ENOSPC if there's no suitable hole.
    431  */
    432 static inline int
    433 drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
    434 			   u64 size, u64 alignment,
    435 			   unsigned long color,
    436 			   enum drm_mm_insert_mode mode)
    437 {
    438 	return drm_mm_insert_node_in_range(mm, node,
    439 					   size, alignment, color,
    440 					   0, U64_MAX, mode);
    441 }
    442 
    443 /**
    444  * drm_mm_insert_node - search for space and insert @node
    445  * @mm: drm_mm to allocate from
    446  * @node: preallocate node to insert
    447  * @size: size of the allocation
    448  *
    449  * This is a simplified version of drm_mm_insert_node_generic() with @color set
    450  * to 0.
    451  *
    452  * The preallocated node must be cleared to 0.
    453  *
    454  * Returns:
    455  * 0 on success, -ENOSPC if there's no suitable hole.
    456  */
    457 static inline int drm_mm_insert_node(struct drm_mm *mm,
    458 				     struct drm_mm_node *node,
    459 				     u64 size)
    460 {
    461 	return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
    462 }
    463 
    464 void drm_mm_remove_node(struct drm_mm_node *node);
    465 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
    466 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
    467 void drm_mm_takedown(struct drm_mm *mm);
    468 
    469 /**
    470  * drm_mm_clean - checks whether an allocator is clean
    471  * @mm: drm_mm allocator to check
    472  *
    473  * Returns:
    474  * True if the allocator is completely free, false if there's still a node
    475  * allocated in it.
    476  */
    477 static inline bool drm_mm_clean(const struct drm_mm *mm)
    478 {
    479 	return list_empty(drm_mm_nodes(mm));
    480 }
    481 
    482 struct drm_mm_node *
    483 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
    484 
    485 /**
    486  * drm_mm_for_each_node_in_range - iterator to walk over a range of
    487  * allocated nodes
    488  * @node__: drm_mm_node structure to assign to in each iteration step
    489  * @mm__: drm_mm allocator to walk
    490  * @start__: starting offset, the first node will overlap this
    491  * @end__: ending offset, the last node will start before this (but may overlap)
    492  *
    493  * This iterator walks over all nodes in the range allocator that lie
    494  * between @start and @end. It is implemented similarly to list_for_each(),
    495  * but using the internal interval tree to accelerate the search for the
    496  * starting node, and so not safe against removal of elements. It assumes
    497  * that @end is within (or is the upper limit of) the drm_mm allocator.
    498  * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
    499  * over the special _unallocated_ &drm_mm.head_node, and may even continue
    500  * indefinitely.
    501  */
    502 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)	\
    503 	for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
    504 	     node__->start < (end__);					\
    505 	     node__ = list_next_entry(node__, node_list))
    506 
    507 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
    508 				 struct drm_mm *mm,
    509 				 u64 size, u64 alignment, unsigned long color,
    510 				 u64 start, u64 end,
    511 				 enum drm_mm_insert_mode mode);
    512 
    513 /**
    514  * drm_mm_scan_init - initialize lru scanning
    515  * @scan: scan state
    516  * @mm: drm_mm to scan
    517  * @size: size of the allocation
    518  * @alignment: alignment of the allocation
    519  * @color: opaque tag value to use for the allocation
    520  * @mode: fine-tune the allocation search and placement
    521  *
    522  * This is a simplified version of drm_mm_scan_init_with_range() with no range
    523  * restrictions applied.
    524  *
    525  * This simply sets up the scanning routines with the parameters for the desired
    526  * hole.
    527  *
    528  * Warning:
    529  * As long as the scan list is non-empty, no other operations than
    530  * adding/removing nodes to/from the scan list are allowed.
    531  */
    532 static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
    533 				    struct drm_mm *mm,
    534 				    u64 size,
    535 				    u64 alignment,
    536 				    unsigned long color,
    537 				    enum drm_mm_insert_mode mode)
    538 {
    539 	drm_mm_scan_init_with_range(scan, mm,
    540 				    size, alignment, color,
    541 				    0, U64_MAX, mode);
    542 }
    543 
    544 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
    545 			   struct drm_mm_node *node);
    546 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
    547 			      struct drm_mm_node *node);
    548 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
    549 
    550 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
    551 
    552 #endif
    553