1 1.20 riastrad /* $NetBSD: drm_mm.c,v 1.20 2022/09/01 11:48:59 riastradh Exp $ */ 2 1.4 riastrad 3 1.1 riastrad /************************************************************************** 4 1.1 riastrad * 5 1.1 riastrad * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 6 1.7 riastrad * Copyright 2016 Intel Corporation 7 1.1 riastrad * All Rights Reserved. 8 1.1 riastrad * 9 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 10 1.1 riastrad * copy of this software and associated documentation files (the 11 1.1 riastrad * "Software"), to deal in the Software without restriction, including 12 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 13 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 14 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 15 1.1 riastrad * the following conditions: 16 1.1 riastrad * 17 1.1 riastrad * The above copyright notice and this permission notice (including the 18 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 19 1.1 riastrad * of the Software. 20 1.1 riastrad * 21 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 24 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 25 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 26 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 27 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 28 1.1 riastrad * 29 1.1 riastrad * 30 1.1 riastrad **************************************************************************/ 31 1.1 riastrad 32 1.1 riastrad /* 33 1.1 riastrad * Generic simple memory manager implementation. Intended to be used as a base 34 1.1 riastrad * class implementation for more advanced memory managers. 35 1.1 riastrad * 36 1.1 riastrad * Note that the algorithm used is quite simple and there might be substantial 37 1.7 riastrad * performance gains if a smarter free list is implemented. Currently it is 38 1.7 riastrad * just an unordered stack of free regions. This could easily be improved if 39 1.7 riastrad * an RB-tree is used instead. At least if we expect heavy fragmentation. 40 1.1 riastrad * 41 1.1 riastrad * Aligned allocations can also see improvement. 42 1.1 riastrad * 43 1.1 riastrad * Authors: 44 1.1 riastrad * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com> 45 1.1 riastrad */ 46 1.1 riastrad 47 1.4 riastrad #include <sys/cdefs.h> 48 1.20 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.20 2022/09/01 11:48:59 riastradh Exp $"); 49 1.7 riastrad 50 1.7 riastrad #include <linux/export.h> 51 1.7 riastrad #include <linux/interval_tree_generic.h> 52 1.7 riastrad #include <linux/seq_file.h> 53 1.7 riastrad #include <linux/slab.h> 54 1.7 riastrad #include <linux/stacktrace.h> 55 1.4 riastrad 56 1.1 riastrad #include <drm/drm_mm.h> 57 1.1 riastrad 58 1.3 riastrad /** 59 1.3 riastrad * DOC: Overview 60 1.3 riastrad * 61 1.3 riastrad * drm_mm provides a simple range allocator. The drivers are free to use the 62 1.3 riastrad * resource allocator from the linux core if it suits them, the upside of drm_mm 63 1.3 riastrad * is that it's in the DRM core. Which means that it's easier to extend for 64 1.3 riastrad * some of the crazier special purpose needs of gpus. 65 1.3 riastrad * 66 1.3 riastrad * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 67 1.3 riastrad * Drivers are free to embed either of them into their own suitable 68 1.7 riastrad * datastructures. drm_mm itself will not do any memory allocations of its own, 69 1.7 riastrad * so if drivers choose not to embed nodes they need to still allocate them 70 1.3 riastrad * themselves. 71 1.3 riastrad * 72 1.3 riastrad * The range allocator also supports reservation of preallocated blocks. This is 73 1.3 riastrad * useful for taking over initial mode setting configurations from the firmware, 74 1.3 riastrad * where an object needs to be created which exactly matches the firmware's 75 1.3 riastrad * scanout target. As long as the range is still free it can be inserted anytime 76 1.3 riastrad * after the allocator is initialized, which helps with avoiding looped 77 1.7 riastrad * dependencies in the driver load sequence. 78 1.3 riastrad * 79 1.3 riastrad * drm_mm maintains a stack of most recently freed holes, which of all 80 1.3 riastrad * simplistic datastructures seems to be a fairly decent approach to clustering 81 1.3 riastrad * allocations and avoiding too much fragmentation. This means free space 82 1.3 riastrad * searches are O(num_holes). Given that all the fancy features drm_mm supports 83 1.3 riastrad * something better would be fairly complex and since gfx thrashing is a fairly 84 1.3 riastrad * steep cliff not a real concern. Removing a node again is O(1). 85 1.3 riastrad * 86 1.3 riastrad * drm_mm supports a few features: Alignment and range restrictions can be 87 1.7 riastrad * supplied. Furthermore every &drm_mm_node has a color value (which is just an 88 1.7 riastrad * opaque unsigned long) which in conjunction with a driver callback can be used 89 1.3 riastrad * to implement sophisticated placement restrictions. The i915 DRM driver uses 90 1.3 riastrad * this to implement guard pages between incompatible caching domains in the 91 1.3 riastrad * graphics TT. 92 1.3 riastrad * 93 1.7 riastrad * Two behaviors are supported for searching and allocating: bottom-up and 94 1.7 riastrad * top-down. The default is bottom-up. Top-down allocation can be used if the 95 1.7 riastrad * memory area has different restrictions, or just to reduce fragmentation. 96 1.1 riastrad * 97 1.3 riastrad * Finally iteration helpers to walk all nodes and all holes are provided as are 98 1.3 riastrad * some basic allocator dumpers for debugging. 99 1.7 riastrad * 100 1.7 riastrad * Note that this range allocator is not thread-safe, drivers need to protect 101 1.7 riastrad * modifications with their own locking. The idea behind this is that for a full 102 1.7 riastrad * memory manager additional data needs to be protected anyway, hence internal 103 1.7 riastrad * locking would be fully redundant. 104 1.1 riastrad */ 105 1.1 riastrad 106 1.7 riastrad #ifdef CONFIG_DRM_DEBUG_MM 107 1.7 riastrad #include <linux/stackdepot.h> 108 1.7 riastrad 109 1.7 riastrad #define STACKDEPTH 32 110 1.7 riastrad #define BUFSZ 4096 111 1.7 riastrad 112 1.7 riastrad static noinline void save_stack(struct drm_mm_node *node) 113 1.7 riastrad { 114 1.7 riastrad unsigned long entries[STACKDEPTH]; 115 1.7 riastrad unsigned int n; 116 1.7 riastrad 117 1.7 riastrad n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); 118 1.7 riastrad 119 1.7 riastrad /* May be called under spinlock, so avoid sleeping */ 120 1.7 riastrad node->stack = stack_depot_save(entries, n, GFP_NOWAIT); 121 1.7 riastrad } 122 1.7 riastrad 123 1.7 riastrad static void show_leaks(struct drm_mm *mm) 124 1.7 riastrad { 125 1.7 riastrad struct drm_mm_node *node; 126 1.7 riastrad unsigned long *entries; 127 1.7 riastrad unsigned int nr_entries; 128 1.7 riastrad char *buf; 129 1.7 riastrad 130 1.7 riastrad buf = kmalloc(BUFSZ, GFP_KERNEL); 131 1.7 riastrad if (!buf) 132 1.7 riastrad return; 133 1.7 riastrad 134 1.7 riastrad list_for_each_entry(node, drm_mm_nodes(mm), node_list) { 135 1.7 riastrad if (!node->stack) { 136 1.10 riastrad DRM_ERROR("node [%08"PRIx64" + %08"PRIx64"]: unknown owner\n", 137 1.7 riastrad node->start, node->size); 138 1.7 riastrad continue; 139 1.7 riastrad } 140 1.7 riastrad 141 1.7 riastrad nr_entries = stack_depot_fetch(node->stack, &entries); 142 1.7 riastrad stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); 143 1.10 riastrad DRM_ERROR("node [%08"PRIx64" + %08"PRIx64"]: inserted at\n%s", 144 1.7 riastrad node->start, node->size, buf); 145 1.7 riastrad } 146 1.7 riastrad 147 1.7 riastrad kfree(buf); 148 1.7 riastrad } 149 1.7 riastrad 150 1.7 riastrad #undef STACKDEPTH 151 1.7 riastrad #undef BUFSZ 152 1.7 riastrad #else 153 1.7 riastrad static void save_stack(struct drm_mm_node *node) { } 154 1.7 riastrad static void show_leaks(struct drm_mm *mm) { } 155 1.7 riastrad #endif 156 1.7 riastrad 157 1.7 riastrad #define START(node) ((node)->start) 158 1.7 riastrad #define LAST(node) ((node)->start + (node)->size - 1) 159 1.7 riastrad 160 1.10 riastrad #ifndef __NetBSD__ 161 1.7 riastrad INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, 162 1.7 riastrad u64, __subtree_last, 163 1.7 riastrad START, LAST, static inline, drm_mm_interval_tree) 164 1.10 riastrad #endif 165 1.7 riastrad 166 1.7 riastrad struct drm_mm_node * 167 1.9 riastrad __drm_mm_interval_first(const struct drm_mm *mm_const, u64 start, u64 last) 168 1.7 riastrad { 169 1.9 riastrad struct drm_mm *mm = __UNCONST(mm_const); 170 1.10 riastrad #ifdef __NetBSD__ 171 1.10 riastrad struct drm_mm_node *node; 172 1.10 riastrad list_for_each_entry(node, &mm->head_node.node_list, node_list) { 173 1.19 riastrad if (start <= LAST(node) && START(node) <= last) 174 1.10 riastrad return node; 175 1.10 riastrad } 176 1.19 riastrad return &mm->head_node; 177 1.10 riastrad #else 178 1.7 riastrad return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, 179 1.7 riastrad start, last) ?: (struct drm_mm_node *)&mm->head_node; 180 1.10 riastrad #endif 181 1.7 riastrad } 182 1.7 riastrad EXPORT_SYMBOL(__drm_mm_interval_first); 183 1.7 riastrad 184 1.10 riastrad #ifndef __NetBSD__ 185 1.7 riastrad static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, 186 1.7 riastrad struct drm_mm_node *node) 187 1.1 riastrad { 188 1.1 riastrad struct drm_mm *mm = hole_node->mm; 189 1.7 riastrad struct rb_node **link, *rb; 190 1.7 riastrad struct drm_mm_node *parent; 191 1.7 riastrad bool leftmost; 192 1.7 riastrad 193 1.7 riastrad node->__subtree_last = LAST(node); 194 1.7 riastrad 195 1.7 riastrad if (drm_mm_node_allocated(hole_node)) { 196 1.7 riastrad rb = &hole_node->rb; 197 1.7 riastrad while (rb) { 198 1.7 riastrad parent = rb_entry(rb, struct drm_mm_node, rb); 199 1.7 riastrad if (parent->__subtree_last >= node->__subtree_last) 200 1.7 riastrad break; 201 1.7 riastrad 202 1.7 riastrad parent->__subtree_last = node->__subtree_last; 203 1.7 riastrad rb = rb_parent(rb); 204 1.7 riastrad } 205 1.7 riastrad 206 1.7 riastrad rb = &hole_node->rb; 207 1.7 riastrad link = &hole_node->rb.rb_right; 208 1.7 riastrad leftmost = false; 209 1.7 riastrad } else { 210 1.7 riastrad rb = NULL; 211 1.7 riastrad link = &mm->interval_tree.rb_root.rb_node; 212 1.7 riastrad leftmost = true; 213 1.7 riastrad } 214 1.7 riastrad 215 1.7 riastrad while (*link) { 216 1.7 riastrad rb = *link; 217 1.7 riastrad parent = rb_entry(rb, struct drm_mm_node, rb); 218 1.7 riastrad if (parent->__subtree_last < node->__subtree_last) 219 1.7 riastrad parent->__subtree_last = node->__subtree_last; 220 1.7 riastrad if (node->start < parent->start) { 221 1.7 riastrad link = &parent->rb.rb_left; 222 1.7 riastrad } else { 223 1.7 riastrad link = &parent->rb.rb_right; 224 1.7 riastrad leftmost = false; 225 1.7 riastrad } 226 1.7 riastrad } 227 1.7 riastrad 228 1.7 riastrad rb_link_node(&node->rb, rb, link); 229 1.7 riastrad rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, 230 1.7 riastrad &drm_mm_interval_tree_augment); 231 1.7 riastrad } 232 1.10 riastrad #endif 233 1.7 riastrad 234 1.9 riastrad #ifdef __NetBSD__ 235 1.9 riastrad 236 1.9 riastrad static int 237 1.9 riastrad compare_hole_addrs(void *cookie, const void *va, const void *vb) 238 1.9 riastrad { 239 1.9 riastrad const struct drm_mm_node *a = va, *b = vb; 240 1.9 riastrad const u64 aa = __drm_mm_hole_node_start(a); 241 1.9 riastrad const u64 ba = __drm_mm_hole_node_start(b); 242 1.9 riastrad 243 1.17 riastrad KASSERTMSG((aa == ba || 244 1.17 riastrad aa + a->hole_size <= ba || 245 1.17 riastrad aa >= ba + b->hole_size), 246 1.17 riastrad "overlapping holes: [0x%"PRIx64", 0x%"PRIx64")," 247 1.17 riastrad " [0x%"PRIx64", 0x%"PRIx64")", 248 1.17 riastrad aa, aa + a->hole_size, 249 1.17 riastrad ba, ba + b->hole_size); 250 1.9 riastrad if (aa < ba) 251 1.9 riastrad return -1; 252 1.9 riastrad if (aa > ba) 253 1.9 riastrad return +1; 254 1.9 riastrad return 0; 255 1.9 riastrad } 256 1.9 riastrad 257 1.9 riastrad static int 258 1.9 riastrad compare_hole_addr_key(void *cookie, const void *vn, const void *vk) 259 1.9 riastrad { 260 1.9 riastrad const struct drm_mm_node *n = vn; 261 1.9 riastrad const u64 a = __drm_mm_hole_node_start(n); 262 1.9 riastrad const u64 *k = vk; 263 1.9 riastrad 264 1.9 riastrad if (a < *k) 265 1.9 riastrad return -1; 266 1.17 riastrad if (a + n->hole_size >= *k) /* allows range lookups */ 267 1.9 riastrad return +1; 268 1.9 riastrad return 0; 269 1.9 riastrad } 270 1.9 riastrad 271 1.9 riastrad static const rb_tree_ops_t holes_addr_rb_ops = { 272 1.9 riastrad .rbto_compare_nodes = compare_hole_addrs, 273 1.9 riastrad .rbto_compare_key = compare_hole_addr_key, 274 1.9 riastrad .rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_addr), 275 1.9 riastrad }; 276 1.9 riastrad 277 1.9 riastrad #else 278 1.9 riastrad 279 1.7 riastrad #define RB_INSERT(root, member, expr) do { \ 280 1.7 riastrad struct rb_node **link = &root.rb_node, *rb = NULL; \ 281 1.7 riastrad u64 x = expr(node); \ 282 1.7 riastrad while (*link) { \ 283 1.7 riastrad rb = *link; \ 284 1.7 riastrad if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \ 285 1.7 riastrad link = &rb->rb_left; \ 286 1.7 riastrad else \ 287 1.7 riastrad link = &rb->rb_right; \ 288 1.7 riastrad } \ 289 1.7 riastrad rb_link_node(&node->member, rb, link); \ 290 1.7 riastrad rb_insert_color(&node->member, &root); \ 291 1.7 riastrad } while (0) 292 1.9 riastrad 293 1.8 riastrad #endif 294 1.7 riastrad 295 1.7 riastrad #define HOLE_SIZE(NODE) ((NODE)->hole_size) 296 1.7 riastrad #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) 297 1.7 riastrad 298 1.7 riastrad static u64 rb_to_hole_size(struct rb_node *rb) 299 1.7 riastrad { 300 1.7 riastrad return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; 301 1.7 riastrad } 302 1.7 riastrad 303 1.9 riastrad static int 304 1.9 riastrad compare_hole_sizes(void *cookie, const void *va, const void *vb) 305 1.9 riastrad { 306 1.9 riastrad const struct drm_mm_node *a = va, *b = vb; 307 1.9 riastrad 308 1.12 riastrad if (a->hole_size > b->hole_size) 309 1.12 riastrad return -1; 310 1.9 riastrad if (a->hole_size < b->hole_size) 311 1.9 riastrad return +1; 312 1.11 riastrad return (a < b ? -1 : a > b ? +1 : 0); 313 1.9 riastrad } 314 1.9 riastrad 315 1.9 riastrad static int 316 1.9 riastrad compare_hole_size_key(void *cookie, const void *vn, const void *vk) 317 1.9 riastrad { 318 1.9 riastrad const struct drm_mm_node *n = vn; 319 1.9 riastrad const u64 *k = vk; 320 1.9 riastrad 321 1.12 riastrad if (n->hole_size > *k) 322 1.12 riastrad return -1; 323 1.9 riastrad if (n->hole_size < *k) 324 1.9 riastrad return +1; 325 1.9 riastrad return 0; 326 1.9 riastrad } 327 1.9 riastrad 328 1.9 riastrad static const rb_tree_ops_t holes_size_rb_ops = { 329 1.9 riastrad .rbto_compare_nodes = compare_hole_sizes, 330 1.9 riastrad .rbto_compare_key = compare_hole_size_key, 331 1.9 riastrad .rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_size), 332 1.9 riastrad }; 333 1.9 riastrad 334 1.7 riastrad static void insert_hole_size(struct rb_root_cached *root, 335 1.7 riastrad struct drm_mm_node *node) 336 1.7 riastrad { 337 1.9 riastrad #ifdef __NetBSD__ 338 1.9 riastrad struct drm_mm_node *collision __diagused; 339 1.9 riastrad collision = rb_tree_insert_node(&root->rb_root.rbr_tree, node); 340 1.9 riastrad KASSERT(collision == node); 341 1.9 riastrad #else 342 1.7 riastrad struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; 343 1.7 riastrad u64 x = node->hole_size; 344 1.7 riastrad bool first = true; 345 1.7 riastrad 346 1.7 riastrad while (*link) { 347 1.7 riastrad rb = *link; 348 1.7 riastrad if (x > rb_to_hole_size(rb)) { 349 1.7 riastrad link = &rb->rb_left; 350 1.7 riastrad } else { 351 1.7 riastrad link = &rb->rb_right; 352 1.7 riastrad first = false; 353 1.7 riastrad } 354 1.7 riastrad } 355 1.7 riastrad 356 1.7 riastrad rb_link_node(&node->rb_hole_size, rb, link); 357 1.7 riastrad rb_insert_color_cached(&node->rb_hole_size, root, first); 358 1.9 riastrad #endif 359 1.7 riastrad } 360 1.7 riastrad 361 1.7 riastrad static void add_hole(struct drm_mm_node *node) 362 1.7 riastrad { 363 1.7 riastrad struct drm_mm *mm = node->mm; 364 1.7 riastrad 365 1.7 riastrad node->hole_size = 366 1.7 riastrad __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); 367 1.7 riastrad DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 368 1.1 riastrad 369 1.7 riastrad insert_hole_size(&mm->holes_size, node); 370 1.8 riastrad #ifdef __NetBSD__ 371 1.9 riastrad struct drm_mm_node *collision __diagused; 372 1.10 riastrad collision = rb_tree_insert_node(&mm->holes_addr.rbr_tree, node); 373 1.9 riastrad KASSERT(collision == node); 374 1.8 riastrad #else 375 1.7 riastrad RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR); 376 1.8 riastrad #endif 377 1.1 riastrad 378 1.7 riastrad list_add(&node->hole_stack, &mm->hole_stack); 379 1.7 riastrad } 380 1.7 riastrad 381 1.7 riastrad static void rm_hole(struct drm_mm_node *node) 382 1.7 riastrad { 383 1.7 riastrad DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 384 1.7 riastrad 385 1.7 riastrad list_del(&node->hole_stack); 386 1.7 riastrad rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); 387 1.7 riastrad rb_erase(&node->rb_hole_addr, &node->mm->holes_addr); 388 1.7 riastrad node->hole_size = 0; 389 1.7 riastrad 390 1.7 riastrad DRM_MM_BUG_ON(drm_mm_hole_follows(node)); 391 1.7 riastrad } 392 1.7 riastrad 393 1.7 riastrad static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) 394 1.7 riastrad { 395 1.7 riastrad return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); 396 1.7 riastrad } 397 1.7 riastrad 398 1.7 riastrad static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) 399 1.7 riastrad { 400 1.7 riastrad return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); 401 1.7 riastrad } 402 1.1 riastrad 403 1.7 riastrad static inline u64 rb_hole_size(struct rb_node *rb) 404 1.7 riastrad { 405 1.7 riastrad return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; 406 1.7 riastrad } 407 1.3 riastrad 408 1.7 riastrad static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) 409 1.7 riastrad { 410 1.10 riastrad #ifdef __NetBSD__ 411 1.15 riastrad struct drm_mm_node *best; 412 1.15 riastrad 413 1.15 riastrad best = rb_tree_find_node_leq(&mm->holes_size.rb_root.rbr_tree, &size); 414 1.15 riastrad KASSERT(best == NULL || size <= best->hole_size); 415 1.15 riastrad 416 1.15 riastrad return best; 417 1.10 riastrad #else 418 1.7 riastrad struct rb_node *rb = mm->holes_size.rb_root.rb_node; 419 1.7 riastrad struct drm_mm_node *best = NULL; 420 1.4 riastrad 421 1.7 riastrad do { 422 1.7 riastrad struct drm_mm_node *node = 423 1.7 riastrad rb_entry(rb, struct drm_mm_node, rb_hole_size); 424 1.7 riastrad 425 1.7 riastrad if (size <= node->hole_size) { 426 1.7 riastrad best = node; 427 1.7 riastrad rb = rb->rb_right; 428 1.7 riastrad } else { 429 1.7 riastrad rb = rb->rb_left; 430 1.3 riastrad } 431 1.7 riastrad } while (rb); 432 1.7 riastrad 433 1.7 riastrad return best; 434 1.10 riastrad #endif 435 1.7 riastrad } 436 1.7 riastrad 437 1.7 riastrad static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) 438 1.7 riastrad { 439 1.10 riastrad #ifdef __NetBSD__ 440 1.20 riastrad struct rb_node *rb = mm->holes_addr.rbr_tree.rbt_root; 441 1.10 riastrad #else 442 1.7 riastrad struct rb_node *rb = mm->holes_addr.rb_node; 443 1.20 riastrad #endif 444 1.7 riastrad struct drm_mm_node *node = NULL; 445 1.7 riastrad 446 1.7 riastrad while (rb) { 447 1.7 riastrad u64 hole_start; 448 1.7 riastrad 449 1.7 riastrad node = rb_hole_addr_to_node(rb); 450 1.7 riastrad hole_start = __drm_mm_hole_node_start(node); 451 1.7 riastrad 452 1.7 riastrad if (addr < hole_start) 453 1.7 riastrad rb = node->rb_hole_addr.rb_left; 454 1.7 riastrad else if (addr > hole_start + node->hole_size) 455 1.7 riastrad rb = node->rb_hole_addr.rb_right; 456 1.7 riastrad else 457 1.7 riastrad break; 458 1.1 riastrad } 459 1.1 riastrad 460 1.7 riastrad return node; 461 1.7 riastrad } 462 1.7 riastrad 463 1.7 riastrad static struct drm_mm_node * 464 1.7 riastrad first_hole(struct drm_mm *mm, 465 1.7 riastrad u64 start, u64 end, u64 size, 466 1.7 riastrad enum drm_mm_insert_mode mode) 467 1.7 riastrad { 468 1.7 riastrad switch (mode) { 469 1.7 riastrad default: 470 1.7 riastrad case DRM_MM_INSERT_BEST: 471 1.7 riastrad return best_hole(mm, size); 472 1.7 riastrad 473 1.7 riastrad case DRM_MM_INSERT_LOW: 474 1.7 riastrad return find_hole(mm, start); 475 1.3 riastrad 476 1.7 riastrad case DRM_MM_INSERT_HIGH: 477 1.7 riastrad return find_hole(mm, end); 478 1.7 riastrad 479 1.7 riastrad case DRM_MM_INSERT_EVICT: 480 1.7 riastrad return list_first_entry_or_null(&mm->hole_stack, 481 1.7 riastrad struct drm_mm_node, 482 1.7 riastrad hole_stack); 483 1.1 riastrad } 484 1.7 riastrad } 485 1.1 riastrad 486 1.7 riastrad static struct drm_mm_node * 487 1.7 riastrad next_hole(struct drm_mm *mm, 488 1.7 riastrad struct drm_mm_node *node, 489 1.7 riastrad enum drm_mm_insert_mode mode) 490 1.7 riastrad { 491 1.7 riastrad switch (mode) { 492 1.7 riastrad default: 493 1.7 riastrad case DRM_MM_INSERT_BEST: 494 1.9 riastrad #ifdef __NetBSD__ 495 1.13 riastrad return RB_TREE_PREV(&mm->holes_size.rb_root.rbr_tree, node); 496 1.9 riastrad #else 497 1.7 riastrad return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); 498 1.9 riastrad #endif 499 1.1 riastrad 500 1.7 riastrad case DRM_MM_INSERT_LOW: 501 1.9 riastrad #ifdef __NetBSD__ 502 1.9 riastrad return RB_TREE_NEXT(&mm->holes_addr.rbr_tree, node); 503 1.9 riastrad #else 504 1.7 riastrad return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)); 505 1.9 riastrad #endif 506 1.1 riastrad 507 1.7 riastrad case DRM_MM_INSERT_HIGH: 508 1.9 riastrad #ifdef __NetBSD__ 509 1.9 riastrad return RB_TREE_PREV(&mm->holes_addr.rbr_tree, node); 510 1.9 riastrad #else 511 1.7 riastrad return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr)); 512 1.9 riastrad #endif 513 1.1 riastrad 514 1.7 riastrad case DRM_MM_INSERT_EVICT: 515 1.7 riastrad node = list_next_entry(node, hole_stack); 516 1.7 riastrad return &node->hole_stack == &mm->hole_stack ? NULL : node; 517 1.1 riastrad } 518 1.1 riastrad } 519 1.1 riastrad 520 1.3 riastrad /** 521 1.3 riastrad * drm_mm_reserve_node - insert an pre-initialized node 522 1.3 riastrad * @mm: drm_mm allocator to insert @node into 523 1.3 riastrad * @node: drm_mm_node to insert 524 1.3 riastrad * 525 1.7 riastrad * This functions inserts an already set-up &drm_mm_node into the allocator, 526 1.7 riastrad * meaning that start, size and color must be set by the caller. All other 527 1.7 riastrad * fields must be cleared to 0. This is useful to initialize the allocator with 528 1.7 riastrad * preallocated objects which must be set-up before the range allocator can be 529 1.7 riastrad * set-up, e.g. when taking over a firmware framebuffer. 530 1.3 riastrad * 531 1.3 riastrad * Returns: 532 1.3 riastrad * 0 on success, -ENOSPC if there's no hole where @node is. 533 1.3 riastrad */ 534 1.3 riastrad int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 535 1.1 riastrad { 536 1.7 riastrad u64 end = node->start + node->size; 537 1.3 riastrad struct drm_mm_node *hole; 538 1.7 riastrad u64 hole_start, hole_end; 539 1.7 riastrad u64 adj_start, adj_end; 540 1.3 riastrad 541 1.7 riastrad end = node->start + node->size; 542 1.7 riastrad if (unlikely(end <= node->start)) 543 1.7 riastrad return -ENOSPC; 544 1.3 riastrad 545 1.3 riastrad /* Find the relevant hole to add our node to */ 546 1.7 riastrad hole = find_hole(mm, node->start); 547 1.7 riastrad if (!hole) 548 1.7 riastrad return -ENOSPC; 549 1.1 riastrad 550 1.7 riastrad adj_start = hole_start = __drm_mm_hole_node_start(hole); 551 1.7 riastrad adj_end = hole_end = hole_start + hole->hole_size; 552 1.1 riastrad 553 1.7 riastrad if (mm->color_adjust) 554 1.7 riastrad mm->color_adjust(hole, node->color, &adj_start, &adj_end); 555 1.1 riastrad 556 1.7 riastrad if (adj_start > node->start || adj_end < end) 557 1.7 riastrad return -ENOSPC; 558 1.3 riastrad 559 1.7 riastrad node->mm = mm; 560 1.3 riastrad 561 1.7 riastrad __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); 562 1.7 riastrad list_add(&node->node_list, &hole->node_list); 563 1.10 riastrad #ifndef __NetBSD__ 564 1.7 riastrad drm_mm_interval_tree_add_node(hole, node); 565 1.10 riastrad #endif 566 1.7 riastrad node->hole_size = 0; 567 1.7 riastrad 568 1.7 riastrad rm_hole(hole); 569 1.7 riastrad if (node->start > hole_start) 570 1.7 riastrad add_hole(hole); 571 1.7 riastrad if (end < hole_end) 572 1.7 riastrad add_hole(node); 573 1.3 riastrad 574 1.7 riastrad save_stack(node); 575 1.7 riastrad return 0; 576 1.1 riastrad } 577 1.3 riastrad EXPORT_SYMBOL(drm_mm_reserve_node); 578 1.1 riastrad 579 1.7 riastrad static u64 rb_to_hole_size_or_zero(struct rb_node *rb) 580 1.7 riastrad { 581 1.7 riastrad return rb ? rb_to_hole_size(rb) : 0; 582 1.7 riastrad } 583 1.7 riastrad 584 1.1 riastrad /** 585 1.7 riastrad * drm_mm_insert_node_in_range - ranged search for space and insert @node 586 1.3 riastrad * @mm: drm_mm to allocate from 587 1.3 riastrad * @node: preallocate node to insert 588 1.3 riastrad * @size: size of the allocation 589 1.3 riastrad * @alignment: alignment of the allocation 590 1.3 riastrad * @color: opaque tag value to use for this node 591 1.7 riastrad * @range_start: start of the allowed range for this node 592 1.7 riastrad * @range_end: end of the allowed range for this node 593 1.7 riastrad * @mode: fine-tune the allocation search and placement 594 1.3 riastrad * 595 1.7 riastrad * The preallocated @node must be cleared to 0. 596 1.3 riastrad * 597 1.3 riastrad * Returns: 598 1.3 riastrad * 0 on success, -ENOSPC if there's no suitable hole. 599 1.1 riastrad */ 600 1.7 riastrad int drm_mm_insert_node_in_range(struct drm_mm * const mm, 601 1.7 riastrad struct drm_mm_node * const node, 602 1.7 riastrad u64 size, u64 alignment, 603 1.7 riastrad unsigned long color, 604 1.7 riastrad u64 range_start, u64 range_end, 605 1.7 riastrad enum drm_mm_insert_mode mode) 606 1.7 riastrad { 607 1.7 riastrad struct drm_mm_node *hole; 608 1.7 riastrad u64 remainder_mask; 609 1.7 riastrad bool once; 610 1.7 riastrad 611 1.7 riastrad DRM_MM_BUG_ON(range_start > range_end); 612 1.7 riastrad 613 1.7 riastrad if (unlikely(size == 0 || range_end - range_start < size)) 614 1.7 riastrad return -ENOSPC; 615 1.7 riastrad 616 1.7 riastrad if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size) 617 1.1 riastrad return -ENOSPC; 618 1.1 riastrad 619 1.7 riastrad if (alignment <= 1) 620 1.7 riastrad alignment = 0; 621 1.1 riastrad 622 1.7 riastrad once = mode & DRM_MM_INSERT_ONCE; 623 1.7 riastrad mode &= ~DRM_MM_INSERT_ONCE; 624 1.1 riastrad 625 1.7 riastrad remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 626 1.7 riastrad for (hole = first_hole(mm, range_start, range_end, size, mode); 627 1.7 riastrad hole; 628 1.7 riastrad hole = once ? NULL : next_hole(mm, hole, mode)) { 629 1.7 riastrad u64 hole_start = __drm_mm_hole_node_start(hole); 630 1.7 riastrad u64 hole_end = hole_start + hole->hole_size; 631 1.7 riastrad u64 adj_start, adj_end; 632 1.7 riastrad u64 col_start, col_end; 633 1.7 riastrad 634 1.7 riastrad if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) 635 1.7 riastrad break; 636 1.7 riastrad 637 1.7 riastrad if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) 638 1.7 riastrad break; 639 1.7 riastrad 640 1.7 riastrad col_start = hole_start; 641 1.7 riastrad col_end = hole_end; 642 1.7 riastrad if (mm->color_adjust) 643 1.7 riastrad mm->color_adjust(hole, color, &col_start, &col_end); 644 1.1 riastrad 645 1.7 riastrad adj_start = max(col_start, range_start); 646 1.7 riastrad adj_end = min(col_end, range_end); 647 1.4 riastrad 648 1.7 riastrad if (adj_end <= adj_start || adj_end - adj_start < size) 649 1.7 riastrad continue; 650 1.1 riastrad 651 1.7 riastrad if (mode == DRM_MM_INSERT_HIGH) 652 1.7 riastrad adj_start = adj_end - size; 653 1.3 riastrad 654 1.7 riastrad if (alignment) { 655 1.7 riastrad u64 rem; 656 1.1 riastrad 657 1.7 riastrad if (likely(remainder_mask)) 658 1.7 riastrad rem = adj_start & remainder_mask; 659 1.7 riastrad else 660 1.7 riastrad div64_u64_rem(adj_start, alignment, &rem); 661 1.7 riastrad if (rem) { 662 1.4 riastrad adj_start -= rem; 663 1.7 riastrad if (mode != DRM_MM_INSERT_HIGH) 664 1.7 riastrad adj_start += alignment; 665 1.7 riastrad 666 1.7 riastrad if (adj_start < max(col_start, range_start) || 667 1.7 riastrad min(col_end, range_end) - adj_start < size) 668 1.7 riastrad continue; 669 1.7 riastrad 670 1.7 riastrad if (adj_end <= adj_start || 671 1.7 riastrad adj_end - adj_start < size) 672 1.7 riastrad continue; 673 1.7 riastrad } 674 1.3 riastrad } 675 1.1 riastrad 676 1.7 riastrad node->mm = mm; 677 1.7 riastrad node->size = size; 678 1.7 riastrad node->start = adj_start; 679 1.7 riastrad node->color = color; 680 1.7 riastrad node->hole_size = 0; 681 1.1 riastrad 682 1.7 riastrad __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); 683 1.7 riastrad list_add(&node->node_list, &hole->node_list); 684 1.10 riastrad #ifndef __NetBSD__ 685 1.7 riastrad drm_mm_interval_tree_add_node(hole, node); 686 1.10 riastrad #endif 687 1.1 riastrad 688 1.7 riastrad rm_hole(hole); 689 1.7 riastrad if (adj_start > hole_start) 690 1.7 riastrad add_hole(hole); 691 1.7 riastrad if (adj_start + size < hole_end) 692 1.7 riastrad add_hole(node); 693 1.1 riastrad 694 1.7 riastrad save_stack(node); 695 1.7 riastrad return 0; 696 1.7 riastrad } 697 1.1 riastrad 698 1.7 riastrad return -ENOSPC; 699 1.1 riastrad } 700 1.7 riastrad EXPORT_SYMBOL(drm_mm_insert_node_in_range); 701 1.1 riastrad 702 1.7 riastrad static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) 703 1.7 riastrad { 704 1.7 riastrad return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); 705 1.1 riastrad } 706 1.1 riastrad 707 1.1 riastrad /** 708 1.3 riastrad * drm_mm_remove_node - Remove a memory node from the allocator. 709 1.3 riastrad * @node: drm_mm_node to remove 710 1.3 riastrad * 711 1.3 riastrad * This just removes a node from its drm_mm allocator. The node does not need to 712 1.3 riastrad * be cleared again before it can be re-inserted into this or any other drm_mm 713 1.7 riastrad * allocator. It is a bug to call this function on a unallocated node. 714 1.1 riastrad */ 715 1.1 riastrad void drm_mm_remove_node(struct drm_mm_node *node) 716 1.1 riastrad { 717 1.1 riastrad struct drm_mm *mm = node->mm; 718 1.1 riastrad struct drm_mm_node *prev_node; 719 1.1 riastrad 720 1.7 riastrad DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); 721 1.7 riastrad DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); 722 1.1 riastrad 723 1.7 riastrad prev_node = list_prev_entry(node, node_list); 724 1.1 riastrad 725 1.7 riastrad if (drm_mm_hole_follows(node)) 726 1.7 riastrad rm_hole(node); 727 1.1 riastrad 728 1.10 riastrad #ifdef __NetBSD__ 729 1.10 riastrad __USE(mm); 730 1.10 riastrad #else 731 1.7 riastrad drm_mm_interval_tree_remove(node, &mm->interval_tree); 732 1.10 riastrad #endif 733 1.1 riastrad list_del(&node->node_list); 734 1.4 riastrad 735 1.7 riastrad if (drm_mm_hole_follows(prev_node)) 736 1.7 riastrad rm_hole(prev_node); 737 1.7 riastrad add_hole(prev_node); 738 1.1 riastrad 739 1.7 riastrad clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); 740 1.1 riastrad } 741 1.7 riastrad EXPORT_SYMBOL(drm_mm_remove_node); 742 1.1 riastrad 743 1.1 riastrad /** 744 1.3 riastrad * drm_mm_replace_node - move an allocation from @old to @new 745 1.3 riastrad * @old: drm_mm_node to remove from the allocator 746 1.3 riastrad * @new: drm_mm_node which should inherit @old's allocation 747 1.3 riastrad * 748 1.3 riastrad * This is useful for when drivers embed the drm_mm_node structure and hence 749 1.3 riastrad * can't move allocations by reassigning pointers. It's a combination of remove 750 1.3 riastrad * and insert with the guarantee that the allocation start will match. 751 1.1 riastrad */ 752 1.1 riastrad void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 753 1.1 riastrad { 754 1.7 riastrad struct drm_mm *mm = old->mm; 755 1.7 riastrad 756 1.7 riastrad DRM_MM_BUG_ON(!drm_mm_node_allocated(old)); 757 1.7 riastrad 758 1.7 riastrad *new = *old; 759 1.7 riastrad 760 1.7 riastrad __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags); 761 1.1 riastrad list_replace(&old->node_list, &new->node_list); 762 1.10 riastrad #ifndef __NetBSD__ 763 1.7 riastrad rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); 764 1.10 riastrad #endif 765 1.7 riastrad 766 1.7 riastrad if (drm_mm_hole_follows(old)) { 767 1.7 riastrad list_replace(&old->hole_stack, &new->hole_stack); 768 1.7 riastrad rb_replace_node_cached(&old->rb_hole_size, 769 1.7 riastrad &new->rb_hole_size, 770 1.7 riastrad &mm->holes_size); 771 1.7 riastrad rb_replace_node(&old->rb_hole_addr, 772 1.7 riastrad &new->rb_hole_addr, 773 1.7 riastrad &mm->holes_addr); 774 1.7 riastrad } 775 1.1 riastrad 776 1.7 riastrad clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags); 777 1.1 riastrad } 778 1.1 riastrad EXPORT_SYMBOL(drm_mm_replace_node); 779 1.1 riastrad 780 1.1 riastrad /** 781 1.7 riastrad * DOC: lru scan roster 782 1.3 riastrad * 783 1.3 riastrad * Very often GPUs need to have continuous allocations for a given object. When 784 1.3 riastrad * evicting objects to make space for a new one it is therefore not most 785 1.3 riastrad * efficient when we simply start to select all objects from the tail of an LRU 786 1.3 riastrad * until there's a suitable hole: Especially for big objects or nodes that 787 1.3 riastrad * otherwise have special allocation constraints there's a good chance we evict 788 1.7 riastrad * lots of (smaller) objects unnecessarily. 789 1.3 riastrad * 790 1.3 riastrad * The DRM range allocator supports this use-case through the scanning 791 1.3 riastrad * interfaces. First a scan operation needs to be initialized with 792 1.7 riastrad * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds 793 1.7 riastrad * objects to the roster, probably by walking an LRU list, but this can be 794 1.7 riastrad * freely implemented. Eviction candiates are added using 795 1.7 riastrad * drm_mm_scan_add_block() until a suitable hole is found or there are no 796 1.7 riastrad * further evictable objects. Eviction roster metadata is tracked in &struct 797 1.7 riastrad * drm_mm_scan. 798 1.3 riastrad * 799 1.7 riastrad * The driver must walk through all objects again in exactly the reverse 800 1.3 riastrad * order to restore the allocator state. Note that while the allocator is used 801 1.3 riastrad * in the scan mode no other operation is allowed. 802 1.3 riastrad * 803 1.7 riastrad * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() 804 1.7 riastrad * reported true) in the scan, and any overlapping nodes after color adjustment 805 1.7 riastrad * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and 806 1.7 riastrad * since freeing a node is also O(1) the overall complexity is 807 1.7 riastrad * O(scanned_objects). So like the free stack which needs to be walked before a 808 1.7 riastrad * scan operation even begins this is linear in the number of objects. It 809 1.7 riastrad * doesn't seem to hurt too badly. 810 1.3 riastrad */ 811 1.3 riastrad 812 1.3 riastrad /** 813 1.7 riastrad * drm_mm_scan_init_with_range - initialize range-restricted lru scanning 814 1.7 riastrad * @scan: scan state 815 1.3 riastrad * @mm: drm_mm to scan 816 1.3 riastrad * @size: size of the allocation 817 1.3 riastrad * @alignment: alignment of the allocation 818 1.3 riastrad * @color: opaque tag value to use for the allocation 819 1.3 riastrad * @start: start of the allowed range for the allocation 820 1.3 riastrad * @end: end of the allowed range for the allocation 821 1.7 riastrad * @mode: fine-tune the allocation search and placement 822 1.1 riastrad * 823 1.1 riastrad * This simply sets up the scanning routines with the parameters for the desired 824 1.7 riastrad * hole. 825 1.1 riastrad * 826 1.3 riastrad * Warning: 827 1.3 riastrad * As long as the scan list is non-empty, no other operations than 828 1.1 riastrad * adding/removing nodes to/from the scan list are allowed. 829 1.1 riastrad */ 830 1.7 riastrad void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, 831 1.7 riastrad struct drm_mm *mm, 832 1.4 riastrad u64 size, 833 1.7 riastrad u64 alignment, 834 1.1 riastrad unsigned long color, 835 1.4 riastrad u64 start, 836 1.7 riastrad u64 end, 837 1.7 riastrad enum drm_mm_insert_mode mode) 838 1.1 riastrad { 839 1.7 riastrad DRM_MM_BUG_ON(start >= end); 840 1.7 riastrad DRM_MM_BUG_ON(!size || size > end - start); 841 1.7 riastrad DRM_MM_BUG_ON(mm->scan_active); 842 1.7 riastrad 843 1.7 riastrad scan->mm = mm; 844 1.7 riastrad 845 1.7 riastrad if (alignment <= 1) 846 1.7 riastrad alignment = 0; 847 1.7 riastrad 848 1.7 riastrad scan->color = color; 849 1.7 riastrad scan->alignment = alignment; 850 1.7 riastrad scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 851 1.7 riastrad scan->size = size; 852 1.7 riastrad scan->mode = mode; 853 1.7 riastrad 854 1.7 riastrad DRM_MM_BUG_ON(end <= start); 855 1.7 riastrad scan->range_start = start; 856 1.7 riastrad scan->range_end = end; 857 1.7 riastrad 858 1.7 riastrad scan->hit_start = U64_MAX; 859 1.7 riastrad scan->hit_end = 0; 860 1.1 riastrad } 861 1.7 riastrad EXPORT_SYMBOL(drm_mm_scan_init_with_range); 862 1.1 riastrad 863 1.1 riastrad /** 864 1.3 riastrad * drm_mm_scan_add_block - add a node to the scan list 865 1.7 riastrad * @scan: the active drm_mm scanner 866 1.3 riastrad * @node: drm_mm_node to add 867 1.3 riastrad * 868 1.1 riastrad * Add a node to the scan list that might be freed to make space for the desired 869 1.1 riastrad * hole. 870 1.1 riastrad * 871 1.3 riastrad * Returns: 872 1.3 riastrad * True if a hole has been found, false otherwise. 873 1.1 riastrad */ 874 1.7 riastrad bool drm_mm_scan_add_block(struct drm_mm_scan *scan, 875 1.7 riastrad struct drm_mm_node *node) 876 1.1 riastrad { 877 1.7 riastrad struct drm_mm *mm = scan->mm; 878 1.7 riastrad struct drm_mm_node *hole; 879 1.4 riastrad u64 hole_start, hole_end; 880 1.7 riastrad u64 col_start, col_end; 881 1.4 riastrad u64 adj_start, adj_end; 882 1.1 riastrad 883 1.7 riastrad DRM_MM_BUG_ON(node->mm != mm); 884 1.7 riastrad DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); 885 1.7 riastrad DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); 886 1.7 riastrad __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); 887 1.7 riastrad mm->scan_active++; 888 1.7 riastrad 889 1.7 riastrad /* Remove this block from the node_list so that we enlarge the hole 890 1.7 riastrad * (distance between the end of our previous node and the start of 891 1.7 riastrad * or next), without poisoning the link so that we can restore it 892 1.7 riastrad * later in drm_mm_scan_remove_block(). 893 1.7 riastrad */ 894 1.7 riastrad hole = list_prev_entry(node, node_list); 895 1.7 riastrad DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); 896 1.7 riastrad __list_del_entry(&node->node_list); 897 1.1 riastrad 898 1.7 riastrad hole_start = __drm_mm_hole_node_start(hole); 899 1.7 riastrad hole_end = __drm_mm_hole_node_end(hole); 900 1.1 riastrad 901 1.7 riastrad col_start = hole_start; 902 1.7 riastrad col_end = hole_end; 903 1.7 riastrad if (mm->color_adjust) 904 1.7 riastrad mm->color_adjust(hole, scan->color, &col_start, &col_end); 905 1.1 riastrad 906 1.7 riastrad adj_start = max(col_start, scan->range_start); 907 1.7 riastrad adj_end = min(col_end, scan->range_end); 908 1.7 riastrad if (adj_end <= adj_start || adj_end - adj_start < scan->size) 909 1.7 riastrad return false; 910 1.7 riastrad 911 1.7 riastrad if (scan->mode == DRM_MM_INSERT_HIGH) 912 1.7 riastrad adj_start = adj_end - scan->size; 913 1.7 riastrad 914 1.7 riastrad if (scan->alignment) { 915 1.7 riastrad u64 rem; 916 1.7 riastrad 917 1.7 riastrad if (likely(scan->remainder_mask)) 918 1.7 riastrad rem = adj_start & scan->remainder_mask; 919 1.7 riastrad else 920 1.7 riastrad div64_u64_rem(adj_start, scan->alignment, &rem); 921 1.7 riastrad if (rem) { 922 1.7 riastrad adj_start -= rem; 923 1.7 riastrad if (scan->mode != DRM_MM_INSERT_HIGH) 924 1.7 riastrad adj_start += scan->alignment; 925 1.7 riastrad if (adj_start < max(col_start, scan->range_start) || 926 1.7 riastrad min(col_end, scan->range_end) - adj_start < scan->size) 927 1.7 riastrad return false; 928 1.7 riastrad 929 1.7 riastrad if (adj_end <= adj_start || 930 1.7 riastrad adj_end - adj_start < scan->size) 931 1.7 riastrad return false; 932 1.7 riastrad } 933 1.1 riastrad } 934 1.1 riastrad 935 1.7 riastrad scan->hit_start = adj_start; 936 1.7 riastrad scan->hit_end = adj_start + scan->size; 937 1.1 riastrad 938 1.7 riastrad DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); 939 1.7 riastrad DRM_MM_BUG_ON(scan->hit_start < hole_start); 940 1.7 riastrad DRM_MM_BUG_ON(scan->hit_end > hole_end); 941 1.1 riastrad 942 1.7 riastrad return true; 943 1.1 riastrad } 944 1.1 riastrad EXPORT_SYMBOL(drm_mm_scan_add_block); 945 1.1 riastrad 946 1.1 riastrad /** 947 1.3 riastrad * drm_mm_scan_remove_block - remove a node from the scan list 948 1.7 riastrad * @scan: the active drm_mm scanner 949 1.3 riastrad * @node: drm_mm_node to remove 950 1.1 riastrad * 951 1.7 riastrad * Nodes **must** be removed in exactly the reverse order from the scan list as 952 1.7 riastrad * they have been added (e.g. using list_add() as they are added and then 953 1.7 riastrad * list_for_each() over that eviction list to remove), otherwise the internal 954 1.7 riastrad * state of the memory manager will be corrupted. 955 1.1 riastrad * 956 1.1 riastrad * When the scan list is empty, the selected memory nodes can be freed. An 957 1.7 riastrad * immediately following drm_mm_insert_node_in_range_generic() or one of the 958 1.7 riastrad * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return 959 1.7 riastrad * the just freed block (because it's at the top of the free_stack list). 960 1.1 riastrad * 961 1.3 riastrad * Returns: 962 1.3 riastrad * True if this block should be evicted, false otherwise. Will always 963 1.3 riastrad * return false when no hole has been found. 964 1.1 riastrad */ 965 1.7 riastrad bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, 966 1.7 riastrad struct drm_mm_node *node) 967 1.1 riastrad { 968 1.1 riastrad struct drm_mm_node *prev_node; 969 1.1 riastrad 970 1.7 riastrad DRM_MM_BUG_ON(node->mm != scan->mm); 971 1.7 riastrad DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node)); 972 1.7 riastrad __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); 973 1.7 riastrad 974 1.7 riastrad DRM_MM_BUG_ON(!node->mm->scan_active); 975 1.7 riastrad node->mm->scan_active--; 976 1.7 riastrad 977 1.7 riastrad /* During drm_mm_scan_add_block() we decoupled this node leaving 978 1.7 riastrad * its pointers intact. Now that the caller is walking back along 979 1.7 riastrad * the eviction list we can restore this block into its rightful 980 1.7 riastrad * place on the full node_list. To confirm that the caller is walking 981 1.7 riastrad * backwards correctly we check that prev_node->next == node->next, 982 1.7 riastrad * i.e. both believe the same node should be on the other side of the 983 1.7 riastrad * hole. 984 1.7 riastrad */ 985 1.7 riastrad prev_node = list_prev_entry(node, node_list); 986 1.7 riastrad DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != 987 1.7 riastrad list_next_entry(node, node_list)); 988 1.1 riastrad list_add(&node->node_list, &prev_node->node_list); 989 1.1 riastrad 990 1.7 riastrad return (node->start + node->size > scan->hit_start && 991 1.7 riastrad node->start < scan->hit_end); 992 1.1 riastrad } 993 1.1 riastrad EXPORT_SYMBOL(drm_mm_scan_remove_block); 994 1.1 riastrad 995 1.3 riastrad /** 996 1.7 riastrad * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole 997 1.7 riastrad * @scan: drm_mm scan with target hole 998 1.7 riastrad * 999 1.7 riastrad * After completing an eviction scan and removing the selected nodes, we may 1000 1.7 riastrad * need to remove a few more nodes from either side of the target hole if 1001 1.7 riastrad * mm.color_adjust is being used. 1002 1.3 riastrad * 1003 1.3 riastrad * Returns: 1004 1.7 riastrad * A node to evict, or NULL if there are no overlapping nodes. 1005 1.3 riastrad */ 1006 1.7 riastrad struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) 1007 1.1 riastrad { 1008 1.7 riastrad struct drm_mm *mm = scan->mm; 1009 1.7 riastrad struct drm_mm_node *hole; 1010 1.7 riastrad u64 hole_start, hole_end; 1011 1.7 riastrad 1012 1.7 riastrad DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); 1013 1.7 riastrad 1014 1.7 riastrad if (!mm->color_adjust) 1015 1.7 riastrad return NULL; 1016 1.7 riastrad 1017 1.7 riastrad /* 1018 1.7 riastrad * The hole found during scanning should ideally be the first element 1019 1.7 riastrad * in the hole_stack list, but due to side-effects in the driver it 1020 1.7 riastrad * may not be. 1021 1.7 riastrad */ 1022 1.7 riastrad list_for_each_entry(hole, &mm->hole_stack, hole_stack) { 1023 1.7 riastrad hole_start = __drm_mm_hole_node_start(hole); 1024 1.7 riastrad hole_end = hole_start + hole->hole_size; 1025 1.7 riastrad 1026 1.7 riastrad if (hole_start <= scan->hit_start && 1027 1.7 riastrad hole_end >= scan->hit_end) 1028 1.7 riastrad break; 1029 1.7 riastrad } 1030 1.7 riastrad 1031 1.7 riastrad /* We should only be called after we found the hole previously */ 1032 1.7 riastrad DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); 1033 1.7 riastrad if (unlikely(&hole->hole_stack == &mm->hole_stack)) 1034 1.7 riastrad return NULL; 1035 1.7 riastrad 1036 1.7 riastrad DRM_MM_BUG_ON(hole_start > scan->hit_start); 1037 1.7 riastrad DRM_MM_BUG_ON(hole_end < scan->hit_end); 1038 1.7 riastrad 1039 1.7 riastrad mm->color_adjust(hole, scan->color, &hole_start, &hole_end); 1040 1.7 riastrad if (hole_start > scan->hit_start) 1041 1.7 riastrad return hole; 1042 1.7 riastrad if (hole_end < scan->hit_end) 1043 1.7 riastrad return list_next_entry(hole, node_list); 1044 1.1 riastrad 1045 1.7 riastrad return NULL; 1046 1.1 riastrad } 1047 1.7 riastrad EXPORT_SYMBOL(drm_mm_scan_color_evict); 1048 1.1 riastrad 1049 1.3 riastrad /** 1050 1.3 riastrad * drm_mm_init - initialize a drm-mm allocator 1051 1.3 riastrad * @mm: the drm_mm structure to initialize 1052 1.3 riastrad * @start: start of the range managed by @mm 1053 1.3 riastrad * @size: end of the range managed by @mm 1054 1.3 riastrad * 1055 1.3 riastrad * Note that @mm must be cleared to 0 before calling this function. 1056 1.3 riastrad */ 1057 1.7 riastrad void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) 1058 1.1 riastrad { 1059 1.7 riastrad DRM_MM_BUG_ON(start + size <= start); 1060 1.7 riastrad 1061 1.7 riastrad mm->color_adjust = NULL; 1062 1.7 riastrad 1063 1.1 riastrad INIT_LIST_HEAD(&mm->hole_stack); 1064 1.9 riastrad #ifdef __NetBSD__ 1065 1.10 riastrad /* XXX interval tree */ 1066 1.9 riastrad rb_tree_init(&mm->holes_size.rb_root.rbr_tree, &holes_size_rb_ops); 1067 1.9 riastrad rb_tree_init(&mm->holes_addr.rbr_tree, &holes_addr_rb_ops); 1068 1.9 riastrad #else 1069 1.7 riastrad mm->interval_tree = RB_ROOT_CACHED; 1070 1.7 riastrad mm->holes_size = RB_ROOT_CACHED; 1071 1.7 riastrad mm->holes_addr = RB_ROOT; 1072 1.9 riastrad #endif 1073 1.1 riastrad 1074 1.1 riastrad /* Clever trick to avoid a special case in the free hole tracking. */ 1075 1.1 riastrad INIT_LIST_HEAD(&mm->head_node.node_list); 1076 1.7 riastrad mm->head_node.flags = 0; 1077 1.1 riastrad mm->head_node.mm = mm; 1078 1.1 riastrad mm->head_node.start = start + size; 1079 1.7 riastrad mm->head_node.size = -size; 1080 1.7 riastrad add_hole(&mm->head_node); 1081 1.1 riastrad 1082 1.7 riastrad mm->scan_active = 0; 1083 1.1 riastrad } 1084 1.1 riastrad EXPORT_SYMBOL(drm_mm_init); 1085 1.1 riastrad 1086 1.3 riastrad /** 1087 1.3 riastrad * drm_mm_takedown - clean up a drm_mm allocator 1088 1.3 riastrad * @mm: drm_mm allocator to clean up 1089 1.3 riastrad * 1090 1.3 riastrad * Note that it is a bug to call this function on an allocator which is not 1091 1.3 riastrad * clean. 1092 1.3 riastrad */ 1093 1.7 riastrad void drm_mm_takedown(struct drm_mm *mm) 1094 1.1 riastrad { 1095 1.7 riastrad if (WARN(!drm_mm_clean(mm), 1096 1.7 riastrad "Memory manager not clean during takedown.\n")) 1097 1.7 riastrad show_leaks(mm); 1098 1.3 riastrad } 1099 1.3 riastrad EXPORT_SYMBOL(drm_mm_takedown); 1100 1.1 riastrad 1101 1.7 riastrad static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) 1102 1.3 riastrad { 1103 1.7 riastrad u64 start, size; 1104 1.1 riastrad 1105 1.7 riastrad size = entry->hole_size; 1106 1.7 riastrad if (size) { 1107 1.7 riastrad start = drm_mm_hole_node_start(entry); 1108 1.7 riastrad drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": free\n", 1109 1.7 riastrad start, start + size, size); 1110 1.1 riastrad } 1111 1.1 riastrad 1112 1.7 riastrad return size; 1113 1.1 riastrad } 1114 1.3 riastrad /** 1115 1.7 riastrad * drm_mm_print - print allocator state 1116 1.7 riastrad * @mm: drm_mm allocator to print 1117 1.7 riastrad * @p: DRM printer to use 1118 1.3 riastrad */ 1119 1.7 riastrad void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) 1120 1.1 riastrad { 1121 1.7 riastrad const struct drm_mm_node *entry; 1122 1.4 riastrad u64 total_used = 0, total_free = 0, total = 0; 1123 1.1 riastrad 1124 1.7 riastrad total_free += drm_mm_dump_hole(p, &mm->head_node); 1125 1.1 riastrad 1126 1.1 riastrad drm_mm_for_each_node(entry, mm) { 1127 1.10 riastrad drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": used\n", entry->start, 1128 1.4 riastrad entry->start + entry->size, entry->size); 1129 1.1 riastrad total_used += entry->size; 1130 1.7 riastrad total_free += drm_mm_dump_hole(p, entry); 1131 1.1 riastrad } 1132 1.1 riastrad total = total_free + total_used; 1133 1.1 riastrad 1134 1.9 riastrad drm_printf(p, "total: %"PRIu64", used %"PRIu64" free %"PRIu64"\n", total, 1135 1.4 riastrad total_used, total_free); 1136 1.1 riastrad } 1137 1.7 riastrad EXPORT_SYMBOL(drm_mm_print); 1138