Home | History | Annotate | Line # | Download | only in drm
      1 /*	$NetBSD: drm_mm.h,v 1.7 2021/12/19 11:03:09 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
      6  * Copyright 2016 Intel Corporation
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  *
     30  **************************************************************************/
     31 /*
     32  * Authors:
     33  * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
     34  */
     35 
     36 #ifndef _DRM_MM_H_
     37 #define _DRM_MM_H_
     38 
     39 /*
     40  * Generic range manager structs
     41  */
     42 #include <linux/bug.h>
     43 #include <linux/rbtree.h>
     44 #include <linux/kernel.h>
     45 #include <linux/mm_types.h>
     46 #include <linux/list.h>
     47 #include <linux/spinlock.h>
     48 #ifdef CONFIG_DRM_DEBUG_MM
     49 #include <linux/stackdepot.h>
     50 #endif
     51 #include <drm/drm_print.h>
     52 
     53 #ifdef CONFIG_DRM_DEBUG_MM
     54 #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
     55 #else
     56 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
     57 #endif
     58 
     59 /**
     60  * enum drm_mm_insert_mode - control search and allocation behaviour
     61  *
     62  * The &struct drm_mm range manager supports finding a suitable modes using
     63  * a number of search trees. These trees are oranised by size, by address and
     64  * in most recent eviction order. This allows the user to find either the
     65  * smallest hole to reuse, the lowest or highest address to reuse, or simply
     66  * reuse the most recent eviction that fits. When allocating the &drm_mm_node
     67  * from within the hole, the &drm_mm_insert_mode also dictate whether to
     68  * allocate the lowest matching address or the highest.
     69  */
     70 enum drm_mm_insert_mode {
     71 	/**
     72 	 * @DRM_MM_INSERT_BEST:
     73 	 *
     74 	 * Search for the smallest hole (within the search range) that fits
     75 	 * the desired node.
     76 	 *
     77 	 * Allocates the node from the bottom of the found hole.
     78 	 */
     79 	DRM_MM_INSERT_BEST = 0,
     80 
     81 	/**
     82 	 * @DRM_MM_INSERT_LOW:
     83 	 *
     84 	 * Search for the lowest hole (address closest to 0, within the search
     85 	 * range) that fits the desired node.
     86 	 *
     87 	 * Allocates the node from the bottom of the found hole.
     88 	 */
     89 	DRM_MM_INSERT_LOW,
     90 
     91 	/**
     92 	 * @DRM_MM_INSERT_HIGH:
     93 	 *
     94 	 * Search for the highest hole (address closest to U64_MAX, within the
     95 	 * search range) that fits the desired node.
     96 	 *
     97 	 * Allocates the node from the *top* of the found hole. The specified
     98 	 * alignment for the node is applied to the base of the node
     99 	 * (&drm_mm_node.start).
    100 	 */
    101 	DRM_MM_INSERT_HIGH,
    102 
    103 	/**
    104 	 * @DRM_MM_INSERT_EVICT:
    105 	 *
    106 	 * Search for the most recently evicted hole (within the search range)
    107 	 * that fits the desired node. This is appropriate for use immediately
    108 	 * after performing an eviction scan (see drm_mm_scan_init()) and
    109 	 * removing the selected nodes to form a hole.
    110 	 *
    111 	 * Allocates the node from the bottom of the found hole.
    112 	 */
    113 	DRM_MM_INSERT_EVICT,
    114 
    115 	/**
    116 	 * @DRM_MM_INSERT_ONCE:
    117 	 *
    118 	 * Only check the first hole for suitablity and report -ENOSPC
    119 	 * immediately otherwise, rather than check every hole until a
    120 	 * suitable one is found. Can only be used in conjunction with another
    121 	 * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
    122 	 */
    123 	DRM_MM_INSERT_ONCE = BIT(31),
    124 
    125 	/**
    126 	 * @DRM_MM_INSERT_HIGHEST:
    127 	 *
    128 	 * Only check the highest hole (the hole with the largest address) and
    129 	 * insert the node at the top of the hole or report -ENOSPC if
    130 	 * unsuitable.
    131 	 *
    132 	 * Does not search all holes.
    133 	 */
    134 	DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
    135 
    136 	/**
    137 	 * @DRM_MM_INSERT_LOWEST:
    138 	 *
    139 	 * Only check the lowest hole (the hole with the smallest address) and
    140 	 * insert the node at the bottom of the hole or report -ENOSPC if
    141 	 * unsuitable.
    142 	 *
    143 	 * Does not search all holes.
    144 	 */
    145 	DRM_MM_INSERT_LOWEST  = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
    146 };
    147 
    148 /**
    149  * struct drm_mm_node - allocated block in the DRM allocator
    150  *
    151  * This represents an allocated block in a &drm_mm allocator. Except for
    152  * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
    153  * entirely opaque and should only be accessed through the provided funcions.
    154  * Since allocation of these nodes is entirely handled by the driver they can be
    155  * embedded.
    156  */
    157 struct drm_mm_node {
    158 	/** @color: Opaque driver-private tag. */
    159 	unsigned long color;
    160 	/** @start: Start address of the allocated block. */
    161 	u64 start;
    162 	/** @size: Size of the allocated block. */
    163 	u64 size;
    164 	/* private: */
    165 	struct drm_mm *mm;
    166 	struct list_head node_list;
    167 	struct list_head hole_stack;
    168 #ifndef __NetBSD__		/* XXX interval tree */
    169 	struct rb_node rb;
    170 #endif
    171 	struct rb_node rb_hole_size;
    172 	struct rb_node rb_hole_addr;
    173 	u64 __subtree_last;
    174 	u64 hole_size;
    175 	unsigned long flags;
    176 #define DRM_MM_NODE_ALLOCATED_BIT	0
    177 #define DRM_MM_NODE_SCANNED_BIT		1
    178 #ifdef CONFIG_DRM_DEBUG_MM
    179 	depot_stack_handle_t stack;
    180 #endif
    181 };
    182 
    183 /**
    184  * struct drm_mm - DRM allocator
    185  *
    186  * DRM range allocator with a few special functions and features geared towards
    187  * managing GPU memory. Except for the @color_adjust callback the structure is
    188  * entirely opaque and should only be accessed through the provided functions
    189  * and macros. This structure can be embedded into larger driver structures.
    190  */
    191 struct drm_mm {
    192 	/**
    193 	 * @color_adjust:
    194 	 *
    195 	 * Optional driver callback to further apply restrictions on a hole. The
    196 	 * node argument points at the node containing the hole from which the
    197 	 * block would be allocated (see drm_mm_hole_follows() and friends). The
    198 	 * other arguments are the size of the block to be allocated. The driver
    199 	 * can adjust the start and end as needed to e.g. insert guard pages.
    200 	 */
    201 	void (*color_adjust)(const struct drm_mm_node *node,
    202 			     unsigned long color,
    203 			     u64 *start, u64 *end);
    204 
    205 	/* private: */
    206 	/* List of all memory nodes that immediately precede a free hole. */
    207 	struct list_head hole_stack;
    208 	/* head_node.node_list is the list of all memory nodes, ordered
    209 	 * according to the (increasing) start address of the memory node. */
    210 	struct drm_mm_node head_node;
    211 #ifndef __NetBSD__		/* XXX interval tree */
    212 	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
    213 	struct rb_root_cached interval_tree;
    214 #endif
    215 	struct rb_root_cached holes_size;
    216 	struct rb_root holes_addr;
    217 
    218 	unsigned long scan_active;
    219 };
    220 
    221 /**
    222  * struct drm_mm_scan - DRM allocator eviction roaster data
    223  *
    224  * This structure tracks data needed for the eviction roaster set up using
    225  * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
    226  * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
    227  * be accessed through the provided functions and macros. It is meant to be
    228  * allocated temporarily by the driver on the stack.
    229  */
    230 struct drm_mm_scan {
    231 	/* private: */
    232 	struct drm_mm *mm;
    233 
    234 	u64 size;
    235 	u64 alignment;
    236 	u64 remainder_mask;
    237 
    238 	u64 range_start;
    239 	u64 range_end;
    240 
    241 	u64 hit_start;
    242 	u64 hit_end;
    243 
    244 	unsigned long color;
    245 	enum drm_mm_insert_mode mode;
    246 };
    247 
    248 /**
    249  * drm_mm_node_allocated - checks whether a node is allocated
    250  * @node: drm_mm_node to check
    251  *
    252  * Drivers are required to clear a node prior to using it with the
    253  * drm_mm range manager.
    254  *
    255  * Drivers should use this helper for proper encapsulation of drm_mm
    256  * internals.
    257  *
    258  * Returns:
    259  * True if the @node is allocated.
    260  */
    261 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
    262 {
    263 	return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    264 }
    265 
    266 /**
    267  * drm_mm_initialized - checks whether an allocator is initialized
    268  * @mm: drm_mm to check
    269  *
    270  * Drivers should clear the struct drm_mm prior to initialisation if they
    271  * want to use this function.
    272  *
    273  * Drivers should use this helper for proper encapsulation of drm_mm
    274  * internals.
    275  *
    276  * Returns:
    277  * True if the @mm is initialized.
    278  */
    279 static inline bool drm_mm_initialized(const struct drm_mm *mm)
    280 {
    281 	return mm->hole_stack.next;
    282 }
    283 
    284 /**
    285  * drm_mm_hole_follows - checks whether a hole follows this node
    286  * @node: drm_mm_node to check
    287  *
    288  * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
    289  * If you wish to know whether a hole follows this particular node,
    290  * query this function. See also drm_mm_hole_node_start() and
    291  * drm_mm_hole_node_end().
    292  *
    293  * Returns:
    294  * True if a hole follows the @node.
    295  */
    296 static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
    297 {
    298 	return node->hole_size;
    299 }
    300 
    301 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
    302 {
    303 	return hole_node->start + hole_node->size;
    304 }
    305 
    306 /**
    307  * drm_mm_hole_node_start - computes the start of the hole following @node
    308  * @hole_node: drm_mm_node which implicitly tracks the following hole
    309  *
    310  * This is useful for driver-specific debug dumpers. Otherwise drivers should
    311  * not inspect holes themselves. Drivers must check first whether a hole indeed
    312  * follows by looking at drm_mm_hole_follows()
    313  *
    314  * Returns:
    315  * Start of the subsequent hole.
    316  */
    317 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
    318 {
    319 	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
    320 	return __drm_mm_hole_node_start(hole_node);
    321 }
    322 
    323 static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
    324 {
    325 	return list_next_entry(hole_node, node_list)->start;
    326 }
    327 
    328 /**
    329  * drm_mm_hole_node_end - computes the end of the hole following @node
    330  * @hole_node: drm_mm_node which implicitly tracks the following hole
    331  *
    332  * This is useful for driver-specific debug dumpers. Otherwise drivers should
    333  * not inspect holes themselves. Drivers must check first whether a hole indeed
    334  * follows by looking at drm_mm_hole_follows().
    335  *
    336  * Returns:
    337  * End of the subsequent hole.
    338  */
    339 static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
    340 {
    341 	return __drm_mm_hole_node_end(hole_node);
    342 }
    343 
    344 /**
    345  * drm_mm_nodes - list of nodes under the drm_mm range manager
    346  * @mm: the struct drm_mm range manger
    347  *
    348  * As the drm_mm range manager hides its node_list deep with its
    349  * structure, extracting it looks painful and repetitive. This is
    350  * not expected to be used outside of the drm_mm_for_each_node()
    351  * macros and similar internal functions.
    352  *
    353  * Returns:
    354  * The node list, may be empty.
    355  */
    356 #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
    357 
    358 /**
    359  * drm_mm_for_each_node - iterator to walk over all allocated nodes
    360  * @entry: &struct drm_mm_node to assign to in each iteration step
    361  * @mm: &drm_mm allocator to walk
    362  *
    363  * This iterator walks over all nodes in the range allocator. It is implemented
    364  * with list_for_each(), so not save against removal of elements.
    365  */
    366 #define drm_mm_for_each_node(entry, mm) \
    367 	list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
    368 
    369 /**
    370  * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
    371  * @entry: &struct drm_mm_node to assign to in each iteration step
    372  * @next: &struct drm_mm_node to store the next step
    373  * @mm: &drm_mm allocator to walk
    374  *
    375  * This iterator walks over all nodes in the range allocator. It is implemented
    376  * with list_for_each_safe(), so save against removal of elements.
    377  */
    378 #define drm_mm_for_each_node_safe(entry, next, mm) \
    379 	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
    380 
    381 /**
    382  * drm_mm_for_each_hole - iterator to walk over all holes
    383  * @pos: &drm_mm_node used internally to track progress
    384  * @mm: &drm_mm allocator to walk
    385  * @hole_start: ulong variable to assign the hole start to on each iteration
    386  * @hole_end: ulong variable to assign the hole end to on each iteration
    387  *
    388  * This iterator walks over all holes in the range allocator. It is implemented
    389  * with list_for_each(), so not save against removal of elements. @entry is used
    390  * internally and will not reflect a real drm_mm_node for the very first hole.
    391  * Hence users of this iterator may not access it.
    392  *
    393  * Implementation Note:
    394  * We need to inline list_for_each_entry in order to be able to set hole_start
    395  * and hole_end on each iteration while keeping the macro sane.
    396  */
    397 #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
    398 	for (pos = list_first_entry(&(mm)->hole_stack, \
    399 				    typeof(*pos), hole_stack); \
    400 	     &pos->hole_stack != &(mm)->hole_stack ? \
    401 	     hole_start = drm_mm_hole_node_start(pos), \
    402 	     hole_end = hole_start + pos->hole_size, \
    403 	     1 : 0; \
    404 	     pos = list_next_entry(pos, hole_stack))
    405 
    406 /*
    407  * Basic range manager support (drm_mm.c)
    408  */
    409 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
    410 int drm_mm_insert_node_in_range(struct drm_mm *mm,
    411 				struct drm_mm_node *node,
    412 				u64 size,
    413 				u64 alignment,
    414 				unsigned long color,
    415 				u64 start,
    416 				u64 end,
    417 				enum drm_mm_insert_mode mode);
    418 
    419 /**
    420  * drm_mm_insert_node_generic - search for space and insert @node
    421  * @mm: drm_mm to allocate from
    422  * @node: preallocate node to insert
    423  * @size: size of the allocation
    424  * @alignment: alignment of the allocation
    425  * @color: opaque tag value to use for this node
    426  * @mode: fine-tune the allocation search and placement
    427  *
    428  * This is a simplified version of drm_mm_insert_node_in_range() with no
    429  * range restrictions applied.
    430  *
    431  * The preallocated node must be cleared to 0.
    432  *
    433  * Returns:
    434  * 0 on success, -ENOSPC if there's no suitable hole.
    435  */
    436 static inline int
    437 drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
    438 			   u64 size, u64 alignment,
    439 			   unsigned long color,
    440 			   enum drm_mm_insert_mode mode)
    441 {
    442 	return drm_mm_insert_node_in_range(mm, node,
    443 					   size, alignment, color,
    444 					   0, U64_MAX, mode);
    445 }
    446 
    447 /**
    448  * drm_mm_insert_node - search for space and insert @node
    449  * @mm: drm_mm to allocate from
    450  * @node: preallocate node to insert
    451  * @size: size of the allocation
    452  *
    453  * This is a simplified version of drm_mm_insert_node_generic() with @color set
    454  * to 0.
    455  *
    456  * The preallocated node must be cleared to 0.
    457  *
    458  * Returns:
    459  * 0 on success, -ENOSPC if there's no suitable hole.
    460  */
    461 static inline int drm_mm_insert_node(struct drm_mm *mm,
    462 				     struct drm_mm_node *node,
    463 				     u64 size)
    464 {
    465 	return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
    466 }
    467 
    468 void drm_mm_remove_node(struct drm_mm_node *node);
    469 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
    470 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
    471 void drm_mm_takedown(struct drm_mm *mm);
    472 
    473 /**
    474  * drm_mm_clean - checks whether an allocator is clean
    475  * @mm: drm_mm allocator to check
    476  *
    477  * Returns:
    478  * True if the allocator is completely free, false if there's still a node
    479  * allocated in it.
    480  */
    481 static inline bool drm_mm_clean(const struct drm_mm *mm)
    482 {
    483 	return list_empty(drm_mm_nodes(mm));
    484 }
    485 
    486 struct drm_mm_node *
    487 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
    488 
    489 /**
    490  * drm_mm_for_each_node_in_range - iterator to walk over a range of
    491  * allocated nodes
    492  * @node__: drm_mm_node structure to assign to in each iteration step
    493  * @mm__: drm_mm allocator to walk
    494  * @start__: starting offset, the first node will overlap this
    495  * @end__: ending offset, the last node will start before this (but may overlap)
    496  *
    497  * This iterator walks over all nodes in the range allocator that lie
    498  * between @start and @end. It is implemented similarly to list_for_each(),
    499  * but using the internal interval tree to accelerate the search for the
    500  * starting node, and so not safe against removal of elements. It assumes
    501  * that @end is within (or is the upper limit of) the drm_mm allocator.
    502  * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
    503  * over the special _unallocated_ &drm_mm.head_node, and may even continue
    504  * indefinitely.
    505  */
    506 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)	\
    507 	for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
    508 	     node__->start < (end__);					\
    509 	     node__ = list_next_entry(node__, node_list))
    510 
    511 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
    512 				 struct drm_mm *mm,
    513 				 u64 size, u64 alignment, unsigned long color,
    514 				 u64 start, u64 end,
    515 				 enum drm_mm_insert_mode mode);
    516 
    517 /**
    518  * drm_mm_scan_init - initialize lru scanning
    519  * @scan: scan state
    520  * @mm: drm_mm to scan
    521  * @size: size of the allocation
    522  * @alignment: alignment of the allocation
    523  * @color: opaque tag value to use for the allocation
    524  * @mode: fine-tune the allocation search and placement
    525  *
    526  * This is a simplified version of drm_mm_scan_init_with_range() with no range
    527  * restrictions applied.
    528  *
    529  * This simply sets up the scanning routines with the parameters for the desired
    530  * hole.
    531  *
    532  * Warning:
    533  * As long as the scan list is non-empty, no other operations than
    534  * adding/removing nodes to/from the scan list are allowed.
    535  */
    536 static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
    537 				    struct drm_mm *mm,
    538 				    u64 size,
    539 				    u64 alignment,
    540 				    unsigned long color,
    541 				    enum drm_mm_insert_mode mode)
    542 {
    543 	drm_mm_scan_init_with_range(scan, mm,
    544 				    size, alignment, color,
    545 				    0, U64_MAX, mode);
    546 }
    547 
    548 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
    549 			   struct drm_mm_node *node);
    550 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
    551 			      struct drm_mm_node *node);
    552 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
    553 
    554 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
    555 
    556 #endif
    557