1 1.7 riastrad /* $NetBSD: drm_mm.h,v 1.7 2021/12/19 11:03:09 riastradh Exp $ */ 2 1.5 riastrad 3 1.1 riastrad /************************************************************************** 4 1.1 riastrad * 5 1.1 riastrad * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. 6 1.6 riastrad * Copyright 2016 Intel Corporation 7 1.1 riastrad * All Rights Reserved. 8 1.1 riastrad * 9 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 10 1.1 riastrad * copy of this software and associated documentation files (the 11 1.1 riastrad * "Software"), to deal in the Software without restriction, including 12 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 13 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 14 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 15 1.1 riastrad * the following conditions: 16 1.1 riastrad * 17 1.1 riastrad * The above copyright notice and this permission notice (including the 18 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 19 1.1 riastrad * of the Software. 20 1.1 riastrad * 21 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 24 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 25 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 26 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 27 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 28 1.1 riastrad * 29 1.1 riastrad * 30 1.1 riastrad **************************************************************************/ 31 1.1 riastrad /* 32 1.1 riastrad * Authors: 33 1.1 riastrad * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 34 1.1 riastrad */ 35 1.1 riastrad 36 1.1 riastrad #ifndef _DRM_MM_H_ 37 1.1 riastrad #define _DRM_MM_H_ 38 1.1 riastrad 39 1.1 riastrad /* 40 1.1 riastrad * Generic range manager structs 41 1.1 riastrad */ 42 1.4 riastrad #include <linux/bug.h> 43 1.6 riastrad #include <linux/rbtree.h> 44 1.4 riastrad #include <linux/kernel.h> 45 1.6 riastrad #include <linux/mm_types.h> 46 1.1 riastrad #include <linux/list.h> 47 1.4 riastrad #include <linux/spinlock.h> 48 1.6 riastrad #ifdef CONFIG_DRM_DEBUG_MM 49 1.6 riastrad #include <linux/stackdepot.h> 50 1.1 riastrad #endif 51 1.6 riastrad #include <drm/drm_print.h> 52 1.1 riastrad 53 1.6 riastrad #ifdef CONFIG_DRM_DEBUG_MM 54 1.6 riastrad #define DRM_MM_BUG_ON(expr) BUG_ON(expr) 55 1.6 riastrad #else 56 1.6 riastrad #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) 57 1.6 riastrad #endif 58 1.4 riastrad 59 1.6 riastrad /** 60 1.6 riastrad * enum drm_mm_insert_mode - control search and allocation behaviour 61 1.6 riastrad * 62 1.6 riastrad * The &struct drm_mm range manager supports finding a suitable modes using 63 1.6 riastrad * a number of search trees. These trees are oranised by size, by address and 64 1.6 riastrad * in most recent eviction order. This allows the user to find either the 65 1.6 riastrad * smallest hole to reuse, the lowest or highest address to reuse, or simply 66 1.6 riastrad * reuse the most recent eviction that fits. When allocating the &drm_mm_node 67 1.6 riastrad * from within the hole, the &drm_mm_insert_mode also dictate whether to 68 1.6 riastrad * allocate the lowest matching address or the highest. 69 1.6 riastrad */ 70 1.6 riastrad enum drm_mm_insert_mode { 71 1.6 riastrad /** 72 1.6 riastrad * @DRM_MM_INSERT_BEST: 73 1.6 riastrad * 74 1.6 riastrad * Search for the smallest hole (within the search range) that fits 75 1.6 riastrad * the desired node. 76 1.6 riastrad * 77 1.6 riastrad * Allocates the node from the bottom of the found hole. 78 1.6 riastrad */ 79 1.6 riastrad DRM_MM_INSERT_BEST = 0, 80 1.6 riastrad 81 1.6 riastrad /** 82 1.6 riastrad * @DRM_MM_INSERT_LOW: 83 1.6 riastrad * 84 1.6 riastrad * Search for the lowest hole (address closest to 0, within the search 85 1.6 riastrad * range) that fits the desired node. 86 1.6 riastrad * 87 1.6 riastrad * Allocates the node from the bottom of the found hole. 88 1.6 riastrad */ 89 1.6 riastrad DRM_MM_INSERT_LOW, 90 1.6 riastrad 91 1.6 riastrad /** 92 1.6 riastrad * @DRM_MM_INSERT_HIGH: 93 1.6 riastrad * 94 1.6 riastrad * Search for the highest hole (address closest to U64_MAX, within the 95 1.6 riastrad * search range) that fits the desired node. 96 1.6 riastrad * 97 1.6 riastrad * Allocates the node from the *top* of the found hole. The specified 98 1.6 riastrad * alignment for the node is applied to the base of the node 99 1.6 riastrad * (&drm_mm_node.start). 100 1.6 riastrad */ 101 1.6 riastrad DRM_MM_INSERT_HIGH, 102 1.6 riastrad 103 1.6 riastrad /** 104 1.6 riastrad * @DRM_MM_INSERT_EVICT: 105 1.6 riastrad * 106 1.6 riastrad * Search for the most recently evicted hole (within the search range) 107 1.6 riastrad * that fits the desired node. This is appropriate for use immediately 108 1.6 riastrad * after performing an eviction scan (see drm_mm_scan_init()) and 109 1.6 riastrad * removing the selected nodes to form a hole. 110 1.6 riastrad * 111 1.6 riastrad * Allocates the node from the bottom of the found hole. 112 1.6 riastrad */ 113 1.6 riastrad DRM_MM_INSERT_EVICT, 114 1.6 riastrad 115 1.6 riastrad /** 116 1.6 riastrad * @DRM_MM_INSERT_ONCE: 117 1.6 riastrad * 118 1.6 riastrad * Only check the first hole for suitablity and report -ENOSPC 119 1.6 riastrad * immediately otherwise, rather than check every hole until a 120 1.6 riastrad * suitable one is found. Can only be used in conjunction with another 121 1.6 riastrad * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW. 122 1.6 riastrad */ 123 1.6 riastrad DRM_MM_INSERT_ONCE = BIT(31), 124 1.6 riastrad 125 1.6 riastrad /** 126 1.6 riastrad * @DRM_MM_INSERT_HIGHEST: 127 1.6 riastrad * 128 1.6 riastrad * Only check the highest hole (the hole with the largest address) and 129 1.6 riastrad * insert the node at the top of the hole or report -ENOSPC if 130 1.6 riastrad * unsuitable. 131 1.6 riastrad * 132 1.6 riastrad * Does not search all holes. 133 1.6 riastrad */ 134 1.6 riastrad DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE, 135 1.6 riastrad 136 1.6 riastrad /** 137 1.6 riastrad * @DRM_MM_INSERT_LOWEST: 138 1.6 riastrad * 139 1.6 riastrad * Only check the lowest hole (the hole with the smallest address) and 140 1.6 riastrad * insert the node at the bottom of the hole or report -ENOSPC if 141 1.6 riastrad * unsuitable. 142 1.6 riastrad * 143 1.6 riastrad * Does not search all holes. 144 1.6 riastrad */ 145 1.6 riastrad DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE, 146 1.4 riastrad }; 147 1.4 riastrad 148 1.6 riastrad /** 149 1.6 riastrad * struct drm_mm_node - allocated block in the DRM allocator 150 1.6 riastrad * 151 1.6 riastrad * This represents an allocated block in a &drm_mm allocator. Except for 152 1.6 riastrad * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is 153 1.6 riastrad * entirely opaque and should only be accessed through the provided funcions. 154 1.6 riastrad * Since allocation of these nodes is entirely handled by the driver they can be 155 1.6 riastrad * embedded. 156 1.6 riastrad */ 157 1.1 riastrad struct drm_mm_node { 158 1.6 riastrad /** @color: Opaque driver-private tag. */ 159 1.1 riastrad unsigned long color; 160 1.6 riastrad /** @start: Start address of the allocated block. */ 161 1.5 riastrad u64 start; 162 1.6 riastrad /** @size: Size of the allocated block. */ 163 1.5 riastrad u64 size; 164 1.6 riastrad /* private: */ 165 1.1 riastrad struct drm_mm *mm; 166 1.6 riastrad struct list_head node_list; 167 1.6 riastrad struct list_head hole_stack; 168 1.7 riastrad #ifndef __NetBSD__ /* XXX interval tree */ 169 1.6 riastrad struct rb_node rb; 170 1.7 riastrad #endif 171 1.6 riastrad struct rb_node rb_hole_size; 172 1.6 riastrad struct rb_node rb_hole_addr; 173 1.6 riastrad u64 __subtree_last; 174 1.6 riastrad u64 hole_size; 175 1.6 riastrad unsigned long flags; 176 1.6 riastrad #define DRM_MM_NODE_ALLOCATED_BIT 0 177 1.6 riastrad #define DRM_MM_NODE_SCANNED_BIT 1 178 1.6 riastrad #ifdef CONFIG_DRM_DEBUG_MM 179 1.6 riastrad depot_stack_handle_t stack; 180 1.6 riastrad #endif 181 1.1 riastrad }; 182 1.1 riastrad 183 1.6 riastrad /** 184 1.6 riastrad * struct drm_mm - DRM allocator 185 1.6 riastrad * 186 1.6 riastrad * DRM range allocator with a few special functions and features geared towards 187 1.6 riastrad * managing GPU memory. Except for the @color_adjust callback the structure is 188 1.6 riastrad * entirely opaque and should only be accessed through the provided functions 189 1.6 riastrad * and macros. This structure can be embedded into larger driver structures. 190 1.6 riastrad */ 191 1.1 riastrad struct drm_mm { 192 1.6 riastrad /** 193 1.6 riastrad * @color_adjust: 194 1.6 riastrad * 195 1.6 riastrad * Optional driver callback to further apply restrictions on a hole. The 196 1.6 riastrad * node argument points at the node containing the hole from which the 197 1.6 riastrad * block would be allocated (see drm_mm_hole_follows() and friends). The 198 1.6 riastrad * other arguments are the size of the block to be allocated. The driver 199 1.6 riastrad * can adjust the start and end as needed to e.g. insert guard pages. 200 1.6 riastrad */ 201 1.6 riastrad void (*color_adjust)(const struct drm_mm_node *node, 202 1.6 riastrad unsigned long color, 203 1.6 riastrad u64 *start, u64 *end); 204 1.6 riastrad 205 1.6 riastrad /* private: */ 206 1.1 riastrad /* List of all memory nodes that immediately precede a free hole. */ 207 1.1 riastrad struct list_head hole_stack; 208 1.1 riastrad /* head_node.node_list is the list of all memory nodes, ordered 209 1.1 riastrad * according to the (increasing) start address of the memory node. */ 210 1.1 riastrad struct drm_mm_node head_node; 211 1.7 riastrad #ifndef __NetBSD__ /* XXX interval tree */ 212 1.6 riastrad /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ 213 1.6 riastrad struct rb_root_cached interval_tree; 214 1.7 riastrad #endif 215 1.6 riastrad struct rb_root_cached holes_size; 216 1.6 riastrad struct rb_root holes_addr; 217 1.6 riastrad 218 1.6 riastrad unsigned long scan_active; 219 1.6 riastrad }; 220 1.1 riastrad 221 1.6 riastrad /** 222 1.6 riastrad * struct drm_mm_scan - DRM allocator eviction roaster data 223 1.6 riastrad * 224 1.6 riastrad * This structure tracks data needed for the eviction roaster set up using 225 1.6 riastrad * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and 226 1.6 riastrad * drm_mm_scan_remove_block(). The structure is entirely opaque and should only 227 1.6 riastrad * be accessed through the provided functions and macros. It is meant to be 228 1.6 riastrad * allocated temporarily by the driver on the stack. 229 1.6 riastrad */ 230 1.6 riastrad struct drm_mm_scan { 231 1.6 riastrad /* private: */ 232 1.6 riastrad struct drm_mm *mm; 233 1.6 riastrad 234 1.6 riastrad u64 size; 235 1.6 riastrad u64 alignment; 236 1.6 riastrad u64 remainder_mask; 237 1.6 riastrad 238 1.6 riastrad u64 range_start; 239 1.6 riastrad u64 range_end; 240 1.6 riastrad 241 1.6 riastrad u64 hit_start; 242 1.6 riastrad u64 hit_end; 243 1.6 riastrad 244 1.6 riastrad unsigned long color; 245 1.6 riastrad enum drm_mm_insert_mode mode; 246 1.1 riastrad }; 247 1.1 riastrad 248 1.4 riastrad /** 249 1.4 riastrad * drm_mm_node_allocated - checks whether a node is allocated 250 1.4 riastrad * @node: drm_mm_node to check 251 1.4 riastrad * 252 1.6 riastrad * Drivers are required to clear a node prior to using it with the 253 1.6 riastrad * drm_mm range manager. 254 1.6 riastrad * 255 1.6 riastrad * Drivers should use this helper for proper encapsulation of drm_mm 256 1.4 riastrad * internals. 257 1.4 riastrad * 258 1.4 riastrad * Returns: 259 1.4 riastrad * True if the @node is allocated. 260 1.4 riastrad */ 261 1.6 riastrad static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) 262 1.1 riastrad { 263 1.6 riastrad return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); 264 1.1 riastrad } 265 1.1 riastrad 266 1.4 riastrad /** 267 1.4 riastrad * drm_mm_initialized - checks whether an allocator is initialized 268 1.4 riastrad * @mm: drm_mm to check 269 1.4 riastrad * 270 1.6 riastrad * Drivers should clear the struct drm_mm prior to initialisation if they 271 1.6 riastrad * want to use this function. 272 1.6 riastrad * 273 1.6 riastrad * Drivers should use this helper for proper encapsulation of drm_mm 274 1.4 riastrad * internals. 275 1.4 riastrad * 276 1.4 riastrad * Returns: 277 1.4 riastrad * True if the @mm is initialized. 278 1.4 riastrad */ 279 1.6 riastrad static inline bool drm_mm_initialized(const struct drm_mm *mm) 280 1.1 riastrad { 281 1.1 riastrad return mm->hole_stack.next; 282 1.1 riastrad } 283 1.4 riastrad 284 1.6 riastrad /** 285 1.6 riastrad * drm_mm_hole_follows - checks whether a hole follows this node 286 1.6 riastrad * @node: drm_mm_node to check 287 1.6 riastrad * 288 1.6 riastrad * Holes are embedded into the drm_mm using the tail of a drm_mm_node. 289 1.6 riastrad * If you wish to know whether a hole follows this particular node, 290 1.6 riastrad * query this function. See also drm_mm_hole_node_start() and 291 1.6 riastrad * drm_mm_hole_node_end(). 292 1.6 riastrad * 293 1.6 riastrad * Returns: 294 1.6 riastrad * True if a hole follows the @node. 295 1.6 riastrad */ 296 1.6 riastrad static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) 297 1.6 riastrad { 298 1.6 riastrad return node->hole_size; 299 1.6 riastrad } 300 1.6 riastrad 301 1.6 riastrad static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 302 1.4 riastrad { 303 1.4 riastrad return hole_node->start + hole_node->size; 304 1.4 riastrad } 305 1.4 riastrad 306 1.4 riastrad /** 307 1.4 riastrad * drm_mm_hole_node_start - computes the start of the hole following @node 308 1.4 riastrad * @hole_node: drm_mm_node which implicitly tracks the following hole 309 1.4 riastrad * 310 1.6 riastrad * This is useful for driver-specific debug dumpers. Otherwise drivers should 311 1.6 riastrad * not inspect holes themselves. Drivers must check first whether a hole indeed 312 1.6 riastrad * follows by looking at drm_mm_hole_follows() 313 1.4 riastrad * 314 1.4 riastrad * Returns: 315 1.4 riastrad * Start of the subsequent hole. 316 1.4 riastrad */ 317 1.6 riastrad static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) 318 1.4 riastrad { 319 1.6 riastrad DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); 320 1.4 riastrad return __drm_mm_hole_node_start(hole_node); 321 1.4 riastrad } 322 1.4 riastrad 323 1.6 riastrad static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 324 1.4 riastrad { 325 1.6 riastrad return list_next_entry(hole_node, node_list)->start; 326 1.4 riastrad } 327 1.4 riastrad 328 1.4 riastrad /** 329 1.4 riastrad * drm_mm_hole_node_end - computes the end of the hole following @node 330 1.4 riastrad * @hole_node: drm_mm_node which implicitly tracks the following hole 331 1.4 riastrad * 332 1.6 riastrad * This is useful for driver-specific debug dumpers. Otherwise drivers should 333 1.6 riastrad * not inspect holes themselves. Drivers must check first whether a hole indeed 334 1.6 riastrad * follows by looking at drm_mm_hole_follows(). 335 1.4 riastrad * 336 1.4 riastrad * Returns: 337 1.4 riastrad * End of the subsequent hole. 338 1.4 riastrad */ 339 1.6 riastrad static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) 340 1.4 riastrad { 341 1.4 riastrad return __drm_mm_hole_node_end(hole_node); 342 1.4 riastrad } 343 1.4 riastrad 344 1.4 riastrad /** 345 1.6 riastrad * drm_mm_nodes - list of nodes under the drm_mm range manager 346 1.6 riastrad * @mm: the struct drm_mm range manger 347 1.6 riastrad * 348 1.6 riastrad * As the drm_mm range manager hides its node_list deep with its 349 1.6 riastrad * structure, extracting it looks painful and repetitive. This is 350 1.6 riastrad * not expected to be used outside of the drm_mm_for_each_node() 351 1.6 riastrad * macros and similar internal functions. 352 1.6 riastrad * 353 1.6 riastrad * Returns: 354 1.6 riastrad * The node list, may be empty. 355 1.6 riastrad */ 356 1.6 riastrad #define drm_mm_nodes(mm) (&(mm)->head_node.node_list) 357 1.6 riastrad 358 1.6 riastrad /** 359 1.4 riastrad * drm_mm_for_each_node - iterator to walk over all allocated nodes 360 1.6 riastrad * @entry: &struct drm_mm_node to assign to in each iteration step 361 1.6 riastrad * @mm: &drm_mm allocator to walk 362 1.6 riastrad * 363 1.6 riastrad * This iterator walks over all nodes in the range allocator. It is implemented 364 1.6 riastrad * with list_for_each(), so not save against removal of elements. 365 1.6 riastrad */ 366 1.6 riastrad #define drm_mm_for_each_node(entry, mm) \ 367 1.6 riastrad list_for_each_entry(entry, drm_mm_nodes(mm), node_list) 368 1.6 riastrad 369 1.6 riastrad /** 370 1.6 riastrad * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes 371 1.6 riastrad * @entry: &struct drm_mm_node to assign to in each iteration step 372 1.6 riastrad * @next: &struct drm_mm_node to store the next step 373 1.6 riastrad * @mm: &drm_mm allocator to walk 374 1.4 riastrad * 375 1.4 riastrad * This iterator walks over all nodes in the range allocator. It is implemented 376 1.6 riastrad * with list_for_each_safe(), so save against removal of elements. 377 1.4 riastrad */ 378 1.6 riastrad #define drm_mm_for_each_node_safe(entry, next, mm) \ 379 1.6 riastrad list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) 380 1.4 riastrad 381 1.4 riastrad /** 382 1.4 riastrad * drm_mm_for_each_hole - iterator to walk over all holes 383 1.6 riastrad * @pos: &drm_mm_node used internally to track progress 384 1.6 riastrad * @mm: &drm_mm allocator to walk 385 1.4 riastrad * @hole_start: ulong variable to assign the hole start to on each iteration 386 1.4 riastrad * @hole_end: ulong variable to assign the hole end to on each iteration 387 1.4 riastrad * 388 1.4 riastrad * This iterator walks over all holes in the range allocator. It is implemented 389 1.6 riastrad * with list_for_each(), so not save against removal of elements. @entry is used 390 1.4 riastrad * internally and will not reflect a real drm_mm_node for the very first hole. 391 1.4 riastrad * Hence users of this iterator may not access it. 392 1.4 riastrad * 393 1.4 riastrad * Implementation Note: 394 1.4 riastrad * We need to inline list_for_each_entry in order to be able to set hole_start 395 1.4 riastrad * and hole_end on each iteration while keeping the macro sane. 396 1.4 riastrad */ 397 1.6 riastrad #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ 398 1.6 riastrad for (pos = list_first_entry(&(mm)->hole_stack, \ 399 1.6 riastrad typeof(*pos), hole_stack); \ 400 1.6 riastrad &pos->hole_stack != &(mm)->hole_stack ? \ 401 1.6 riastrad hole_start = drm_mm_hole_node_start(pos), \ 402 1.6 riastrad hole_end = hole_start + pos->hole_size, \ 403 1.4 riastrad 1 : 0; \ 404 1.6 riastrad pos = list_next_entry(pos, hole_stack)) 405 1.4 riastrad 406 1.1 riastrad /* 407 1.1 riastrad * Basic range manager support (drm_mm.c) 408 1.1 riastrad */ 409 1.4 riastrad int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 410 1.6 riastrad int drm_mm_insert_node_in_range(struct drm_mm *mm, 411 1.6 riastrad struct drm_mm_node *node, 412 1.6 riastrad u64 size, 413 1.6 riastrad u64 alignment, 414 1.6 riastrad unsigned long color, 415 1.6 riastrad u64 start, 416 1.6 riastrad u64 end, 417 1.6 riastrad enum drm_mm_insert_mode mode); 418 1.1 riastrad 419 1.4 riastrad /** 420 1.6 riastrad * drm_mm_insert_node_generic - search for space and insert @node 421 1.4 riastrad * @mm: drm_mm to allocate from 422 1.4 riastrad * @node: preallocate node to insert 423 1.4 riastrad * @size: size of the allocation 424 1.4 riastrad * @alignment: alignment of the allocation 425 1.6 riastrad * @color: opaque tag value to use for this node 426 1.6 riastrad * @mode: fine-tune the allocation search and placement 427 1.4 riastrad * 428 1.6 riastrad * This is a simplified version of drm_mm_insert_node_in_range() with no 429 1.6 riastrad * range restrictions applied. 430 1.4 riastrad * 431 1.4 riastrad * The preallocated node must be cleared to 0. 432 1.4 riastrad * 433 1.4 riastrad * Returns: 434 1.4 riastrad * 0 on success, -ENOSPC if there's no suitable hole. 435 1.4 riastrad */ 436 1.6 riastrad static inline int 437 1.6 riastrad drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 438 1.6 riastrad u64 size, u64 alignment, 439 1.6 riastrad unsigned long color, 440 1.6 riastrad enum drm_mm_insert_mode mode) 441 1.6 riastrad { 442 1.6 riastrad return drm_mm_insert_node_in_range(mm, node, 443 1.6 riastrad size, alignment, color, 444 1.6 riastrad 0, U64_MAX, mode); 445 1.6 riastrad } 446 1.6 riastrad 447 1.4 riastrad /** 448 1.6 riastrad * drm_mm_insert_node - search for space and insert @node 449 1.4 riastrad * @mm: drm_mm to allocate from 450 1.4 riastrad * @node: preallocate node to insert 451 1.4 riastrad * @size: size of the allocation 452 1.4 riastrad * 453 1.6 riastrad * This is a simplified version of drm_mm_insert_node_generic() with @color set 454 1.6 riastrad * to 0. 455 1.4 riastrad * 456 1.4 riastrad * The preallocated node must be cleared to 0. 457 1.4 riastrad * 458 1.4 riastrad * Returns: 459 1.4 riastrad * 0 on success, -ENOSPC if there's no suitable hole. 460 1.4 riastrad */ 461 1.6 riastrad static inline int drm_mm_insert_node(struct drm_mm *mm, 462 1.6 riastrad struct drm_mm_node *node, 463 1.6 riastrad u64 size) 464 1.6 riastrad { 465 1.6 riastrad return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); 466 1.4 riastrad } 467 1.4 riastrad 468 1.4 riastrad void drm_mm_remove_node(struct drm_mm_node *node); 469 1.4 riastrad void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 470 1.6 riastrad void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); 471 1.4 riastrad void drm_mm_takedown(struct drm_mm *mm); 472 1.4 riastrad 473 1.6 riastrad /** 474 1.6 riastrad * drm_mm_clean - checks whether an allocator is clean 475 1.6 riastrad * @mm: drm_mm allocator to check 476 1.6 riastrad * 477 1.6 riastrad * Returns: 478 1.6 riastrad * True if the allocator is completely free, false if there's still a node 479 1.6 riastrad * allocated in it. 480 1.6 riastrad */ 481 1.6 riastrad static inline bool drm_mm_clean(const struct drm_mm *mm) 482 1.6 riastrad { 483 1.6 riastrad return list_empty(drm_mm_nodes(mm)); 484 1.6 riastrad } 485 1.6 riastrad 486 1.6 riastrad struct drm_mm_node * 487 1.6 riastrad __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); 488 1.6 riastrad 489 1.6 riastrad /** 490 1.6 riastrad * drm_mm_for_each_node_in_range - iterator to walk over a range of 491 1.6 riastrad * allocated nodes 492 1.6 riastrad * @node__: drm_mm_node structure to assign to in each iteration step 493 1.6 riastrad * @mm__: drm_mm allocator to walk 494 1.6 riastrad * @start__: starting offset, the first node will overlap this 495 1.6 riastrad * @end__: ending offset, the last node will start before this (but may overlap) 496 1.6 riastrad * 497 1.6 riastrad * This iterator walks over all nodes in the range allocator that lie 498 1.6 riastrad * between @start and @end. It is implemented similarly to list_for_each(), 499 1.6 riastrad * but using the internal interval tree to accelerate the search for the 500 1.6 riastrad * starting node, and so not safe against removal of elements. It assumes 501 1.6 riastrad * that @end is within (or is the upper limit of) the drm_mm allocator. 502 1.6 riastrad * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk 503 1.6 riastrad * over the special _unallocated_ &drm_mm.head_node, and may even continue 504 1.6 riastrad * indefinitely. 505 1.6 riastrad */ 506 1.6 riastrad #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ 507 1.6 riastrad for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ 508 1.6 riastrad node__->start < (end__); \ 509 1.6 riastrad node__ = list_next_entry(node__, node_list)) 510 1.6 riastrad 511 1.6 riastrad void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, 512 1.6 riastrad struct drm_mm *mm, 513 1.6 riastrad u64 size, u64 alignment, unsigned long color, 514 1.6 riastrad u64 start, u64 end, 515 1.6 riastrad enum drm_mm_insert_mode mode); 516 1.6 riastrad 517 1.6 riastrad /** 518 1.6 riastrad * drm_mm_scan_init - initialize lru scanning 519 1.6 riastrad * @scan: scan state 520 1.6 riastrad * @mm: drm_mm to scan 521 1.6 riastrad * @size: size of the allocation 522 1.6 riastrad * @alignment: alignment of the allocation 523 1.6 riastrad * @color: opaque tag value to use for the allocation 524 1.6 riastrad * @mode: fine-tune the allocation search and placement 525 1.6 riastrad * 526 1.6 riastrad * This is a simplified version of drm_mm_scan_init_with_range() with no range 527 1.6 riastrad * restrictions applied. 528 1.6 riastrad * 529 1.6 riastrad * This simply sets up the scanning routines with the parameters for the desired 530 1.6 riastrad * hole. 531 1.6 riastrad * 532 1.6 riastrad * Warning: 533 1.6 riastrad * As long as the scan list is non-empty, no other operations than 534 1.6 riastrad * adding/removing nodes to/from the scan list are allowed. 535 1.6 riastrad */ 536 1.6 riastrad static inline void drm_mm_scan_init(struct drm_mm_scan *scan, 537 1.6 riastrad struct drm_mm *mm, 538 1.6 riastrad u64 size, 539 1.6 riastrad u64 alignment, 540 1.6 riastrad unsigned long color, 541 1.6 riastrad enum drm_mm_insert_mode mode) 542 1.6 riastrad { 543 1.6 riastrad drm_mm_scan_init_with_range(scan, mm, 544 1.6 riastrad size, alignment, color, 545 1.6 riastrad 0, U64_MAX, mode); 546 1.6 riastrad } 547 1.6 riastrad 548 1.6 riastrad bool drm_mm_scan_add_block(struct drm_mm_scan *scan, 549 1.6 riastrad struct drm_mm_node *node); 550 1.6 riastrad bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, 551 1.6 riastrad struct drm_mm_node *node); 552 1.6 riastrad struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); 553 1.6 riastrad 554 1.6 riastrad void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); 555 1.1 riastrad 556 1.1 riastrad #endif 557