Home | History | Annotate | Line # | Download | only in drm
drm_mm.c revision 1.19
      1  1.19  riastrad /*	$NetBSD: drm_mm.c,v 1.19 2022/09/01 01:54:28 riastradh Exp $	*/
      2   1.4  riastrad 
      3   1.1  riastrad /**************************************************************************
      4   1.1  riastrad  *
      5   1.1  riastrad  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
      6   1.7  riastrad  * Copyright 2016 Intel Corporation
      7   1.1  riastrad  * All Rights Reserved.
      8   1.1  riastrad  *
      9   1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
     10   1.1  riastrad  * copy of this software and associated documentation files (the
     11   1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     12   1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     13   1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     14   1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     15   1.1  riastrad  * the following conditions:
     16   1.1  riastrad  *
     17   1.1  riastrad  * The above copyright notice and this permission notice (including the
     18   1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     19   1.1  riastrad  * of the Software.
     20   1.1  riastrad  *
     21   1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22   1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23   1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24   1.1  riastrad  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25   1.1  riastrad  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26   1.1  riastrad  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27   1.1  riastrad  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28   1.1  riastrad  *
     29   1.1  riastrad  *
     30   1.1  riastrad  **************************************************************************/
     31   1.1  riastrad 
     32   1.1  riastrad /*
     33   1.1  riastrad  * Generic simple memory manager implementation. Intended to be used as a base
     34   1.1  riastrad  * class implementation for more advanced memory managers.
     35   1.1  riastrad  *
     36   1.1  riastrad  * Note that the algorithm used is quite simple and there might be substantial
     37   1.7  riastrad  * performance gains if a smarter free list is implemented. Currently it is
     38   1.7  riastrad  * just an unordered stack of free regions. This could easily be improved if
     39   1.7  riastrad  * an RB-tree is used instead. At least if we expect heavy fragmentation.
     40   1.1  riastrad  *
     41   1.1  riastrad  * Aligned allocations can also see improvement.
     42   1.1  riastrad  *
     43   1.1  riastrad  * Authors:
     44   1.1  riastrad  * Thomas Hellstrm <thomas-at-tungstengraphics-dot-com>
     45   1.1  riastrad  */
     46   1.1  riastrad 
     47   1.4  riastrad #include <sys/cdefs.h>
     48  1.19  riastrad __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.19 2022/09/01 01:54:28 riastradh Exp $");
     49   1.7  riastrad 
     50   1.7  riastrad #include <linux/export.h>
     51   1.7  riastrad #include <linux/interval_tree_generic.h>
     52   1.7  riastrad #include <linux/seq_file.h>
     53   1.7  riastrad #include <linux/slab.h>
     54   1.7  riastrad #include <linux/stacktrace.h>
     55   1.4  riastrad 
     56   1.1  riastrad #include <drm/drm_mm.h>
     57   1.1  riastrad 
     58   1.3  riastrad /**
     59   1.3  riastrad  * DOC: Overview
     60   1.3  riastrad  *
     61   1.3  riastrad  * drm_mm provides a simple range allocator. The drivers are free to use the
     62   1.3  riastrad  * resource allocator from the linux core if it suits them, the upside of drm_mm
     63   1.3  riastrad  * is that it's in the DRM core. Which means that it's easier to extend for
     64   1.3  riastrad  * some of the crazier special purpose needs of gpus.
     65   1.3  riastrad  *
     66   1.3  riastrad  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
     67   1.3  riastrad  * Drivers are free to embed either of them into their own suitable
     68   1.7  riastrad  * datastructures. drm_mm itself will not do any memory allocations of its own,
     69   1.7  riastrad  * so if drivers choose not to embed nodes they need to still allocate them
     70   1.3  riastrad  * themselves.
     71   1.3  riastrad  *
     72   1.3  riastrad  * The range allocator also supports reservation of preallocated blocks. This is
     73   1.3  riastrad  * useful for taking over initial mode setting configurations from the firmware,
     74   1.3  riastrad  * where an object needs to be created which exactly matches the firmware's
     75   1.3  riastrad  * scanout target. As long as the range is still free it can be inserted anytime
     76   1.3  riastrad  * after the allocator is initialized, which helps with avoiding looped
     77   1.7  riastrad  * dependencies in the driver load sequence.
     78   1.3  riastrad  *
     79   1.3  riastrad  * drm_mm maintains a stack of most recently freed holes, which of all
     80   1.3  riastrad  * simplistic datastructures seems to be a fairly decent approach to clustering
     81   1.3  riastrad  * allocations and avoiding too much fragmentation. This means free space
     82   1.3  riastrad  * searches are O(num_holes). Given that all the fancy features drm_mm supports
     83   1.3  riastrad  * something better would be fairly complex and since gfx thrashing is a fairly
     84   1.3  riastrad  * steep cliff not a real concern. Removing a node again is O(1).
     85   1.3  riastrad  *
     86   1.3  riastrad  * drm_mm supports a few features: Alignment and range restrictions can be
     87   1.7  riastrad  * supplied. Furthermore every &drm_mm_node has a color value (which is just an
     88   1.7  riastrad  * opaque unsigned long) which in conjunction with a driver callback can be used
     89   1.3  riastrad  * to implement sophisticated placement restrictions. The i915 DRM driver uses
     90   1.3  riastrad  * this to implement guard pages between incompatible caching domains in the
     91   1.3  riastrad  * graphics TT.
     92   1.3  riastrad  *
     93   1.7  riastrad  * Two behaviors are supported for searching and allocating: bottom-up and
     94   1.7  riastrad  * top-down. The default is bottom-up. Top-down allocation can be used if the
     95   1.7  riastrad  * memory area has different restrictions, or just to reduce fragmentation.
     96   1.1  riastrad  *
     97   1.3  riastrad  * Finally iteration helpers to walk all nodes and all holes are provided as are
     98   1.3  riastrad  * some basic allocator dumpers for debugging.
     99   1.7  riastrad  *
    100   1.7  riastrad  * Note that this range allocator is not thread-safe, drivers need to protect
    101   1.7  riastrad  * modifications with their own locking. The idea behind this is that for a full
    102   1.7  riastrad  * memory manager additional data needs to be protected anyway, hence internal
    103   1.7  riastrad  * locking would be fully redundant.
    104   1.1  riastrad  */
    105   1.1  riastrad 
    106   1.7  riastrad #ifdef CONFIG_DRM_DEBUG_MM
    107   1.7  riastrad #include <linux/stackdepot.h>
    108   1.7  riastrad 
    109   1.7  riastrad #define STACKDEPTH 32
    110   1.7  riastrad #define BUFSZ 4096
    111   1.7  riastrad 
    112   1.7  riastrad static noinline void save_stack(struct drm_mm_node *node)
    113   1.7  riastrad {
    114   1.7  riastrad 	unsigned long entries[STACKDEPTH];
    115   1.7  riastrad 	unsigned int n;
    116   1.7  riastrad 
    117   1.7  riastrad 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
    118   1.7  riastrad 
    119   1.7  riastrad 	/* May be called under spinlock, so avoid sleeping */
    120   1.7  riastrad 	node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
    121   1.7  riastrad }
    122   1.7  riastrad 
    123   1.7  riastrad static void show_leaks(struct drm_mm *mm)
    124   1.7  riastrad {
    125   1.7  riastrad 	struct drm_mm_node *node;
    126   1.7  riastrad 	unsigned long *entries;
    127   1.7  riastrad 	unsigned int nr_entries;
    128   1.7  riastrad 	char *buf;
    129   1.7  riastrad 
    130   1.7  riastrad 	buf = kmalloc(BUFSZ, GFP_KERNEL);
    131   1.7  riastrad 	if (!buf)
    132   1.7  riastrad 		return;
    133   1.7  riastrad 
    134   1.7  riastrad 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
    135   1.7  riastrad 		if (!node->stack) {
    136  1.10  riastrad 			DRM_ERROR("node [%08"PRIx64" + %08"PRIx64"]: unknown owner\n",
    137   1.7  riastrad 				  node->start, node->size);
    138   1.7  riastrad 			continue;
    139   1.7  riastrad 		}
    140   1.7  riastrad 
    141   1.7  riastrad 		nr_entries = stack_depot_fetch(node->stack, &entries);
    142   1.7  riastrad 		stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
    143  1.10  riastrad 		DRM_ERROR("node [%08"PRIx64" + %08"PRIx64"]: inserted at\n%s",
    144   1.7  riastrad 			  node->start, node->size, buf);
    145   1.7  riastrad 	}
    146   1.7  riastrad 
    147   1.7  riastrad 	kfree(buf);
    148   1.7  riastrad }
    149   1.7  riastrad 
    150   1.7  riastrad #undef STACKDEPTH
    151   1.7  riastrad #undef BUFSZ
    152   1.7  riastrad #else
    153   1.7  riastrad static void save_stack(struct drm_mm_node *node) { }
    154   1.7  riastrad static void show_leaks(struct drm_mm *mm) { }
    155   1.7  riastrad #endif
    156   1.7  riastrad 
    157   1.7  riastrad #define START(node) ((node)->start)
    158   1.7  riastrad #define LAST(node)  ((node)->start + (node)->size - 1)
    159   1.7  riastrad 
    160  1.10  riastrad #ifndef __NetBSD__
    161   1.7  riastrad INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
    162   1.7  riastrad 		     u64, __subtree_last,
    163   1.7  riastrad 		     START, LAST, static inline, drm_mm_interval_tree)
    164  1.10  riastrad #endif
    165   1.7  riastrad 
    166   1.7  riastrad struct drm_mm_node *
    167   1.9  riastrad __drm_mm_interval_first(const struct drm_mm *mm_const, u64 start, u64 last)
    168   1.7  riastrad {
    169   1.9  riastrad 	struct drm_mm *mm = __UNCONST(mm_const);
    170  1.10  riastrad #ifdef __NetBSD__
    171  1.10  riastrad 	struct drm_mm_node *node;
    172  1.10  riastrad 	list_for_each_entry(node, &mm->head_node.node_list, node_list) {
    173  1.19  riastrad 		if (start <= LAST(node) && START(node) <= last)
    174  1.10  riastrad 			return node;
    175  1.10  riastrad 	}
    176  1.19  riastrad 	return &mm->head_node;
    177  1.10  riastrad #else
    178   1.7  riastrad 	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
    179   1.7  riastrad 					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
    180  1.10  riastrad #endif
    181   1.7  riastrad }
    182   1.7  riastrad EXPORT_SYMBOL(__drm_mm_interval_first);
    183   1.7  riastrad 
    184  1.10  riastrad #ifndef __NetBSD__
    185   1.7  riastrad static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
    186   1.7  riastrad 					  struct drm_mm_node *node)
    187   1.1  riastrad {
    188   1.1  riastrad 	struct drm_mm *mm = hole_node->mm;
    189   1.7  riastrad 	struct rb_node **link, *rb;
    190   1.7  riastrad 	struct drm_mm_node *parent;
    191   1.7  riastrad 	bool leftmost;
    192   1.7  riastrad 
    193   1.7  riastrad 	node->__subtree_last = LAST(node);
    194   1.7  riastrad 
    195   1.7  riastrad 	if (drm_mm_node_allocated(hole_node)) {
    196   1.7  riastrad 		rb = &hole_node->rb;
    197   1.7  riastrad 		while (rb) {
    198   1.7  riastrad 			parent = rb_entry(rb, struct drm_mm_node, rb);
    199   1.7  riastrad 			if (parent->__subtree_last >= node->__subtree_last)
    200   1.7  riastrad 				break;
    201   1.7  riastrad 
    202   1.7  riastrad 			parent->__subtree_last = node->__subtree_last;
    203   1.7  riastrad 			rb = rb_parent(rb);
    204   1.7  riastrad 		}
    205   1.7  riastrad 
    206   1.7  riastrad 		rb = &hole_node->rb;
    207   1.7  riastrad 		link = &hole_node->rb.rb_right;
    208   1.7  riastrad 		leftmost = false;
    209   1.7  riastrad 	} else {
    210   1.7  riastrad 		rb = NULL;
    211   1.7  riastrad 		link = &mm->interval_tree.rb_root.rb_node;
    212   1.7  riastrad 		leftmost = true;
    213   1.7  riastrad 	}
    214   1.7  riastrad 
    215   1.7  riastrad 	while (*link) {
    216   1.7  riastrad 		rb = *link;
    217   1.7  riastrad 		parent = rb_entry(rb, struct drm_mm_node, rb);
    218   1.7  riastrad 		if (parent->__subtree_last < node->__subtree_last)
    219   1.7  riastrad 			parent->__subtree_last = node->__subtree_last;
    220   1.7  riastrad 		if (node->start < parent->start) {
    221   1.7  riastrad 			link = &parent->rb.rb_left;
    222   1.7  riastrad 		} else {
    223   1.7  riastrad 			link = &parent->rb.rb_right;
    224   1.7  riastrad 			leftmost = false;
    225   1.7  riastrad 		}
    226   1.7  riastrad 	}
    227   1.7  riastrad 
    228   1.7  riastrad 	rb_link_node(&node->rb, rb, link);
    229   1.7  riastrad 	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
    230   1.7  riastrad 				   &drm_mm_interval_tree_augment);
    231   1.7  riastrad }
    232  1.10  riastrad #endif
    233   1.7  riastrad 
    234   1.9  riastrad #ifdef __NetBSD__
    235   1.9  riastrad 
    236   1.9  riastrad static int
    237   1.9  riastrad compare_hole_addrs(void *cookie, const void *va, const void *vb)
    238   1.9  riastrad {
    239   1.9  riastrad 	const struct drm_mm_node *a = va, *b = vb;
    240   1.9  riastrad 	const u64 aa = __drm_mm_hole_node_start(a);
    241   1.9  riastrad 	const u64 ba = __drm_mm_hole_node_start(b);
    242   1.9  riastrad 
    243  1.17  riastrad 	KASSERTMSG((aa == ba ||
    244  1.17  riastrad 		aa + a->hole_size <= ba ||
    245  1.17  riastrad 		aa >= ba + b->hole_size),
    246  1.17  riastrad 	    "overlapping holes: [0x%"PRIx64", 0x%"PRIx64"),"
    247  1.17  riastrad 	    " [0x%"PRIx64", 0x%"PRIx64")",
    248  1.17  riastrad 	    aa, aa + a->hole_size,
    249  1.17  riastrad 	    ba, ba + b->hole_size);
    250   1.9  riastrad 	if (aa < ba)
    251   1.9  riastrad 		return -1;
    252   1.9  riastrad 	if (aa > ba)
    253   1.9  riastrad 		return +1;
    254   1.9  riastrad 	return 0;
    255   1.9  riastrad }
    256   1.9  riastrad 
    257   1.9  riastrad static int
    258   1.9  riastrad compare_hole_addr_key(void *cookie, const void *vn, const void *vk)
    259   1.9  riastrad {
    260   1.9  riastrad 	const struct drm_mm_node *n = vn;
    261   1.9  riastrad 	const u64 a = __drm_mm_hole_node_start(n);
    262   1.9  riastrad 	const u64 *k = vk;
    263   1.9  riastrad 
    264   1.9  riastrad 	if (a < *k)
    265   1.9  riastrad 		return -1;
    266  1.17  riastrad 	if (a + n->hole_size >= *k) /* allows range lookups */
    267   1.9  riastrad 		return +1;
    268   1.9  riastrad 	return 0;
    269   1.9  riastrad }
    270   1.9  riastrad 
    271   1.9  riastrad static const rb_tree_ops_t holes_addr_rb_ops = {
    272   1.9  riastrad 	.rbto_compare_nodes = compare_hole_addrs,
    273   1.9  riastrad 	.rbto_compare_key = compare_hole_addr_key,
    274   1.9  riastrad 	.rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_addr),
    275   1.9  riastrad };
    276   1.9  riastrad 
    277   1.9  riastrad #else
    278   1.9  riastrad 
    279   1.7  riastrad #define RB_INSERT(root, member, expr) do { \
    280   1.7  riastrad 	struct rb_node **link = &root.rb_node, *rb = NULL; \
    281   1.7  riastrad 	u64 x = expr(node); \
    282   1.7  riastrad 	while (*link) { \
    283   1.7  riastrad 		rb = *link; \
    284   1.7  riastrad 		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
    285   1.7  riastrad 			link = &rb->rb_left; \
    286   1.7  riastrad 		else \
    287   1.7  riastrad 			link = &rb->rb_right; \
    288   1.7  riastrad 	} \
    289   1.7  riastrad 	rb_link_node(&node->member, rb, link); \
    290   1.7  riastrad 	rb_insert_color(&node->member, &root); \
    291   1.7  riastrad } while (0)
    292   1.9  riastrad 
    293   1.8  riastrad #endif
    294   1.7  riastrad 
    295   1.7  riastrad #define HOLE_SIZE(NODE) ((NODE)->hole_size)
    296   1.7  riastrad #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
    297   1.7  riastrad 
    298   1.7  riastrad static u64 rb_to_hole_size(struct rb_node *rb)
    299   1.7  riastrad {
    300   1.7  riastrad 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
    301   1.7  riastrad }
    302   1.7  riastrad 
    303   1.9  riastrad static int
    304   1.9  riastrad compare_hole_sizes(void *cookie, const void *va, const void *vb)
    305   1.9  riastrad {
    306   1.9  riastrad 	const struct drm_mm_node *a = va, *b = vb;
    307   1.9  riastrad 
    308  1.12  riastrad 	if (a->hole_size > b->hole_size)
    309  1.12  riastrad 		return -1;
    310   1.9  riastrad 	if (a->hole_size < b->hole_size)
    311   1.9  riastrad 		return +1;
    312  1.11  riastrad 	return (a < b ? -1 : a > b ? +1 : 0);
    313   1.9  riastrad }
    314   1.9  riastrad 
    315   1.9  riastrad static int
    316   1.9  riastrad compare_hole_size_key(void *cookie, const void *vn, const void *vk)
    317   1.9  riastrad {
    318   1.9  riastrad 	const struct drm_mm_node *n = vn;
    319   1.9  riastrad 	const u64 *k = vk;
    320   1.9  riastrad 
    321  1.12  riastrad 	if (n->hole_size > *k)
    322  1.12  riastrad 		return -1;
    323   1.9  riastrad 	if (n->hole_size < *k)
    324   1.9  riastrad 		return +1;
    325   1.9  riastrad 	return 0;
    326   1.9  riastrad }
    327   1.9  riastrad 
    328   1.9  riastrad static const rb_tree_ops_t holes_size_rb_ops = {
    329   1.9  riastrad 	.rbto_compare_nodes = compare_hole_sizes,
    330   1.9  riastrad 	.rbto_compare_key = compare_hole_size_key,
    331   1.9  riastrad 	.rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_size),
    332   1.9  riastrad };
    333   1.9  riastrad 
    334   1.7  riastrad static void insert_hole_size(struct rb_root_cached *root,
    335   1.7  riastrad 			     struct drm_mm_node *node)
    336   1.7  riastrad {
    337   1.9  riastrad #ifdef __NetBSD__
    338   1.9  riastrad 	struct drm_mm_node *collision __diagused;
    339   1.9  riastrad 	collision = rb_tree_insert_node(&root->rb_root.rbr_tree, node);
    340   1.9  riastrad 	KASSERT(collision == node);
    341   1.9  riastrad #else
    342   1.7  riastrad 	struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
    343   1.7  riastrad 	u64 x = node->hole_size;
    344   1.7  riastrad 	bool first = true;
    345   1.7  riastrad 
    346   1.7  riastrad 	while (*link) {
    347   1.7  riastrad 		rb = *link;
    348   1.7  riastrad 		if (x > rb_to_hole_size(rb)) {
    349   1.7  riastrad 			link = &rb->rb_left;
    350   1.7  riastrad 		} else {
    351   1.7  riastrad 			link = &rb->rb_right;
    352   1.7  riastrad 			first = false;
    353   1.7  riastrad 		}
    354   1.7  riastrad 	}
    355   1.7  riastrad 
    356   1.7  riastrad 	rb_link_node(&node->rb_hole_size, rb, link);
    357   1.7  riastrad 	rb_insert_color_cached(&node->rb_hole_size, root, first);
    358   1.9  riastrad #endif
    359   1.7  riastrad }
    360   1.7  riastrad 
    361   1.7  riastrad static void add_hole(struct drm_mm_node *node)
    362   1.7  riastrad {
    363   1.7  riastrad 	struct drm_mm *mm = node->mm;
    364   1.7  riastrad 
    365   1.7  riastrad 	node->hole_size =
    366   1.7  riastrad 		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
    367   1.7  riastrad 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
    368   1.1  riastrad 
    369   1.7  riastrad 	insert_hole_size(&mm->holes_size, node);
    370   1.8  riastrad #ifdef __NetBSD__
    371   1.9  riastrad 	struct drm_mm_node *collision __diagused;
    372  1.10  riastrad 	collision = rb_tree_insert_node(&mm->holes_addr.rbr_tree, node);
    373   1.9  riastrad 	KASSERT(collision == node);
    374   1.8  riastrad #else
    375   1.7  riastrad 	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
    376   1.8  riastrad #endif
    377   1.1  riastrad 
    378   1.7  riastrad 	list_add(&node->hole_stack, &mm->hole_stack);
    379   1.7  riastrad }
    380   1.7  riastrad 
    381   1.7  riastrad static void rm_hole(struct drm_mm_node *node)
    382   1.7  riastrad {
    383   1.7  riastrad 	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
    384   1.7  riastrad 
    385   1.7  riastrad 	list_del(&node->hole_stack);
    386   1.7  riastrad 	rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
    387   1.7  riastrad 	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
    388   1.7  riastrad 	node->hole_size = 0;
    389   1.7  riastrad 
    390   1.7  riastrad 	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
    391   1.7  riastrad }
    392   1.7  riastrad 
    393   1.7  riastrad static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
    394   1.7  riastrad {
    395   1.7  riastrad 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
    396   1.7  riastrad }
    397   1.7  riastrad 
    398   1.7  riastrad static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
    399   1.7  riastrad {
    400   1.7  riastrad 	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
    401   1.7  riastrad }
    402   1.1  riastrad 
    403   1.7  riastrad static inline u64 rb_hole_size(struct rb_node *rb)
    404   1.7  riastrad {
    405   1.7  riastrad 	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
    406   1.7  riastrad }
    407   1.3  riastrad 
    408   1.7  riastrad static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
    409   1.7  riastrad {
    410  1.10  riastrad #ifdef __NetBSD__
    411  1.15  riastrad 	struct drm_mm_node *best;
    412  1.15  riastrad 
    413  1.15  riastrad 	best = rb_tree_find_node_leq(&mm->holes_size.rb_root.rbr_tree, &size);
    414  1.15  riastrad 	KASSERT(best == NULL || size <= best->hole_size);
    415  1.15  riastrad 
    416  1.15  riastrad 	return best;
    417  1.10  riastrad #else
    418   1.7  riastrad 	struct rb_node *rb = mm->holes_size.rb_root.rb_node;
    419   1.7  riastrad 	struct drm_mm_node *best = NULL;
    420   1.4  riastrad 
    421   1.7  riastrad 	do {
    422   1.7  riastrad 		struct drm_mm_node *node =
    423   1.7  riastrad 			rb_entry(rb, struct drm_mm_node, rb_hole_size);
    424   1.7  riastrad 
    425   1.7  riastrad 		if (size <= node->hole_size) {
    426   1.7  riastrad 			best = node;
    427   1.7  riastrad 			rb = rb->rb_right;
    428   1.7  riastrad 		} else {
    429   1.7  riastrad 			rb = rb->rb_left;
    430   1.3  riastrad 		}
    431   1.7  riastrad 	} while (rb);
    432   1.7  riastrad 
    433   1.7  riastrad 	return best;
    434  1.10  riastrad #endif
    435   1.7  riastrad }
    436   1.7  riastrad 
    437   1.7  riastrad static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
    438   1.7  riastrad {
    439  1.10  riastrad #ifdef __NetBSD__
    440  1.16  riastrad 	struct drm_mm_node *node;
    441  1.16  riastrad 
    442  1.17  riastrad 	node = rb_tree_find_node(&mm->holes_addr.rbr_tree, &addr);
    443  1.16  riastrad 	KASSERT(node == NULL || __drm_mm_hole_node_start(node) <= addr);
    444  1.16  riastrad 	KASSERT(node == NULL || addr <
    445  1.16  riastrad 	    __drm_mm_hole_node_start(node) + node->hole_size);
    446  1.16  riastrad 
    447  1.16  riastrad 	return node;
    448  1.10  riastrad #else
    449   1.7  riastrad 	struct rb_node *rb = mm->holes_addr.rb_node;
    450   1.7  riastrad 	struct drm_mm_node *node = NULL;
    451   1.7  riastrad 
    452   1.7  riastrad 	while (rb) {
    453   1.7  riastrad 		u64 hole_start;
    454   1.7  riastrad 
    455   1.7  riastrad 		node = rb_hole_addr_to_node(rb);
    456   1.7  riastrad 		hole_start = __drm_mm_hole_node_start(node);
    457   1.7  riastrad 
    458   1.7  riastrad 		if (addr < hole_start)
    459   1.7  riastrad 			rb = node->rb_hole_addr.rb_left;
    460   1.7  riastrad 		else if (addr > hole_start + node->hole_size)
    461   1.7  riastrad 			rb = node->rb_hole_addr.rb_right;
    462   1.7  riastrad 		else
    463   1.7  riastrad 			break;
    464   1.1  riastrad 	}
    465   1.1  riastrad 
    466   1.7  riastrad 	return node;
    467  1.10  riastrad #endif
    468   1.7  riastrad }
    469   1.7  riastrad 
    470   1.7  riastrad static struct drm_mm_node *
    471   1.7  riastrad first_hole(struct drm_mm *mm,
    472   1.7  riastrad 	   u64 start, u64 end, u64 size,
    473   1.7  riastrad 	   enum drm_mm_insert_mode mode)
    474   1.7  riastrad {
    475   1.7  riastrad 	switch (mode) {
    476   1.7  riastrad 	default:
    477   1.7  riastrad 	case DRM_MM_INSERT_BEST:
    478   1.7  riastrad 		return best_hole(mm, size);
    479   1.7  riastrad 
    480   1.7  riastrad 	case DRM_MM_INSERT_LOW:
    481  1.18  riastrad #ifdef __NetBSD__
    482  1.18  riastrad 		return rb_tree_find_node_geq(&mm->holes_addr.rbr_tree, &start);
    483  1.18  riastrad #else
    484   1.7  riastrad 		return find_hole(mm, start);
    485  1.18  riastrad #endif
    486   1.3  riastrad 
    487   1.7  riastrad 	case DRM_MM_INSERT_HIGH:
    488  1.18  riastrad #ifdef __NetBSD__
    489  1.18  riastrad 		return rb_tree_find_node_leq(&mm->holes_addr.rbr_tree, &end);
    490  1.18  riastrad #else
    491   1.7  riastrad 		return find_hole(mm, end);
    492  1.18  riastrad #endif
    493   1.7  riastrad 
    494   1.7  riastrad 	case DRM_MM_INSERT_EVICT:
    495   1.7  riastrad 		return list_first_entry_or_null(&mm->hole_stack,
    496   1.7  riastrad 						struct drm_mm_node,
    497   1.7  riastrad 						hole_stack);
    498   1.1  riastrad 	}
    499   1.7  riastrad }
    500   1.1  riastrad 
    501   1.7  riastrad static struct drm_mm_node *
    502   1.7  riastrad next_hole(struct drm_mm *mm,
    503   1.7  riastrad 	  struct drm_mm_node *node,
    504   1.7  riastrad 	  enum drm_mm_insert_mode mode)
    505   1.7  riastrad {
    506   1.7  riastrad 	switch (mode) {
    507   1.7  riastrad 	default:
    508   1.7  riastrad 	case DRM_MM_INSERT_BEST:
    509   1.9  riastrad #ifdef __NetBSD__
    510  1.13  riastrad 		return RB_TREE_PREV(&mm->holes_size.rb_root.rbr_tree, node);
    511   1.9  riastrad #else
    512   1.7  riastrad 		return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
    513   1.9  riastrad #endif
    514   1.1  riastrad 
    515   1.7  riastrad 	case DRM_MM_INSERT_LOW:
    516   1.9  riastrad #ifdef __NetBSD__
    517   1.9  riastrad 		return RB_TREE_NEXT(&mm->holes_addr.rbr_tree, node);
    518   1.9  riastrad #else
    519   1.7  riastrad 		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
    520   1.9  riastrad #endif
    521   1.1  riastrad 
    522   1.7  riastrad 	case DRM_MM_INSERT_HIGH:
    523   1.9  riastrad #ifdef __NetBSD__
    524   1.9  riastrad 		return RB_TREE_PREV(&mm->holes_addr.rbr_tree, node);
    525   1.9  riastrad #else
    526   1.7  riastrad 		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
    527   1.9  riastrad #endif
    528   1.1  riastrad 
    529   1.7  riastrad 	case DRM_MM_INSERT_EVICT:
    530   1.7  riastrad 		node = list_next_entry(node, hole_stack);
    531   1.7  riastrad 		return &node->hole_stack == &mm->hole_stack ? NULL : node;
    532   1.1  riastrad 	}
    533   1.1  riastrad }
    534   1.1  riastrad 
    535   1.3  riastrad /**
    536   1.3  riastrad  * drm_mm_reserve_node - insert an pre-initialized node
    537   1.3  riastrad  * @mm: drm_mm allocator to insert @node into
    538   1.3  riastrad  * @node: drm_mm_node to insert
    539   1.3  riastrad  *
    540   1.7  riastrad  * This functions inserts an already set-up &drm_mm_node into the allocator,
    541   1.7  riastrad  * meaning that start, size and color must be set by the caller. All other
    542   1.7  riastrad  * fields must be cleared to 0. This is useful to initialize the allocator with
    543   1.7  riastrad  * preallocated objects which must be set-up before the range allocator can be
    544   1.7  riastrad  * set-up, e.g. when taking over a firmware framebuffer.
    545   1.3  riastrad  *
    546   1.3  riastrad  * Returns:
    547   1.3  riastrad  * 0 on success, -ENOSPC if there's no hole where @node is.
    548   1.3  riastrad  */
    549   1.3  riastrad int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
    550   1.1  riastrad {
    551   1.7  riastrad 	u64 end = node->start + node->size;
    552   1.3  riastrad 	struct drm_mm_node *hole;
    553   1.7  riastrad 	u64 hole_start, hole_end;
    554   1.7  riastrad 	u64 adj_start, adj_end;
    555   1.3  riastrad 
    556   1.7  riastrad 	end = node->start + node->size;
    557   1.7  riastrad 	if (unlikely(end <= node->start))
    558   1.7  riastrad 		return -ENOSPC;
    559   1.3  riastrad 
    560   1.3  riastrad 	/* Find the relevant hole to add our node to */
    561   1.7  riastrad 	hole = find_hole(mm, node->start);
    562   1.7  riastrad 	if (!hole)
    563   1.7  riastrad 		return -ENOSPC;
    564   1.1  riastrad 
    565   1.7  riastrad 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
    566   1.7  riastrad 	adj_end = hole_end = hole_start + hole->hole_size;
    567   1.1  riastrad 
    568   1.7  riastrad 	if (mm->color_adjust)
    569   1.7  riastrad 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
    570   1.1  riastrad 
    571   1.7  riastrad 	if (adj_start > node->start || adj_end < end)
    572   1.7  riastrad 		return -ENOSPC;
    573   1.3  riastrad 
    574   1.7  riastrad 	node->mm = mm;
    575   1.3  riastrad 
    576   1.7  riastrad 	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    577   1.7  riastrad 	list_add(&node->node_list, &hole->node_list);
    578  1.10  riastrad #ifndef __NetBSD__
    579   1.7  riastrad 	drm_mm_interval_tree_add_node(hole, node);
    580  1.10  riastrad #endif
    581   1.7  riastrad 	node->hole_size = 0;
    582   1.7  riastrad 
    583   1.7  riastrad 	rm_hole(hole);
    584   1.7  riastrad 	if (node->start > hole_start)
    585   1.7  riastrad 		add_hole(hole);
    586   1.7  riastrad 	if (end < hole_end)
    587   1.7  riastrad 		add_hole(node);
    588   1.3  riastrad 
    589   1.7  riastrad 	save_stack(node);
    590   1.7  riastrad 	return 0;
    591   1.1  riastrad }
    592   1.3  riastrad EXPORT_SYMBOL(drm_mm_reserve_node);
    593   1.1  riastrad 
    594   1.7  riastrad static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
    595   1.7  riastrad {
    596   1.7  riastrad 	return rb ? rb_to_hole_size(rb) : 0;
    597   1.7  riastrad }
    598   1.7  riastrad 
    599   1.1  riastrad /**
    600   1.7  riastrad  * drm_mm_insert_node_in_range - ranged search for space and insert @node
    601   1.3  riastrad  * @mm: drm_mm to allocate from
    602   1.3  riastrad  * @node: preallocate node to insert
    603   1.3  riastrad  * @size: size of the allocation
    604   1.3  riastrad  * @alignment: alignment of the allocation
    605   1.3  riastrad  * @color: opaque tag value to use for this node
    606   1.7  riastrad  * @range_start: start of the allowed range for this node
    607   1.7  riastrad  * @range_end: end of the allowed range for this node
    608   1.7  riastrad  * @mode: fine-tune the allocation search and placement
    609   1.3  riastrad  *
    610   1.7  riastrad  * The preallocated @node must be cleared to 0.
    611   1.3  riastrad  *
    612   1.3  riastrad  * Returns:
    613   1.3  riastrad  * 0 on success, -ENOSPC if there's no suitable hole.
    614   1.1  riastrad  */
    615   1.7  riastrad int drm_mm_insert_node_in_range(struct drm_mm * const mm,
    616   1.7  riastrad 				struct drm_mm_node * const node,
    617   1.7  riastrad 				u64 size, u64 alignment,
    618   1.7  riastrad 				unsigned long color,
    619   1.7  riastrad 				u64 range_start, u64 range_end,
    620   1.7  riastrad 				enum drm_mm_insert_mode mode)
    621   1.7  riastrad {
    622   1.7  riastrad 	struct drm_mm_node *hole;
    623   1.7  riastrad 	u64 remainder_mask;
    624   1.7  riastrad 	bool once;
    625   1.7  riastrad 
    626   1.7  riastrad 	DRM_MM_BUG_ON(range_start > range_end);
    627   1.7  riastrad 
    628   1.7  riastrad 	if (unlikely(size == 0 || range_end - range_start < size))
    629   1.7  riastrad 		return -ENOSPC;
    630   1.7  riastrad 
    631   1.7  riastrad 	if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
    632   1.1  riastrad 		return -ENOSPC;
    633   1.1  riastrad 
    634   1.7  riastrad 	if (alignment <= 1)
    635   1.7  riastrad 		alignment = 0;
    636   1.1  riastrad 
    637   1.7  riastrad 	once = mode & DRM_MM_INSERT_ONCE;
    638   1.7  riastrad 	mode &= ~DRM_MM_INSERT_ONCE;
    639   1.1  riastrad 
    640   1.7  riastrad 	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
    641   1.7  riastrad 	for (hole = first_hole(mm, range_start, range_end, size, mode);
    642   1.7  riastrad 	     hole;
    643   1.7  riastrad 	     hole = once ? NULL : next_hole(mm, hole, mode)) {
    644   1.7  riastrad 		u64 hole_start = __drm_mm_hole_node_start(hole);
    645   1.7  riastrad 		u64 hole_end = hole_start + hole->hole_size;
    646   1.7  riastrad 		u64 adj_start, adj_end;
    647   1.7  riastrad 		u64 col_start, col_end;
    648   1.7  riastrad 
    649   1.7  riastrad 		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
    650   1.7  riastrad 			break;
    651   1.7  riastrad 
    652   1.7  riastrad 		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
    653   1.7  riastrad 			break;
    654   1.7  riastrad 
    655   1.7  riastrad 		col_start = hole_start;
    656   1.7  riastrad 		col_end = hole_end;
    657   1.7  riastrad 		if (mm->color_adjust)
    658   1.7  riastrad 			mm->color_adjust(hole, color, &col_start, &col_end);
    659   1.1  riastrad 
    660   1.7  riastrad 		adj_start = max(col_start, range_start);
    661   1.7  riastrad 		adj_end = min(col_end, range_end);
    662   1.4  riastrad 
    663   1.7  riastrad 		if (adj_end <= adj_start || adj_end - adj_start < size)
    664   1.7  riastrad 			continue;
    665   1.1  riastrad 
    666   1.7  riastrad 		if (mode == DRM_MM_INSERT_HIGH)
    667   1.7  riastrad 			adj_start = adj_end - size;
    668   1.3  riastrad 
    669   1.7  riastrad 		if (alignment) {
    670   1.7  riastrad 			u64 rem;
    671   1.1  riastrad 
    672   1.7  riastrad 			if (likely(remainder_mask))
    673   1.7  riastrad 				rem = adj_start & remainder_mask;
    674   1.7  riastrad 			else
    675   1.7  riastrad 				div64_u64_rem(adj_start, alignment, &rem);
    676   1.7  riastrad 			if (rem) {
    677   1.4  riastrad 				adj_start -= rem;
    678   1.7  riastrad 				if (mode != DRM_MM_INSERT_HIGH)
    679   1.7  riastrad 					adj_start += alignment;
    680   1.7  riastrad 
    681   1.7  riastrad 				if (adj_start < max(col_start, range_start) ||
    682   1.7  riastrad 				    min(col_end, range_end) - adj_start < size)
    683   1.7  riastrad 					continue;
    684   1.7  riastrad 
    685   1.7  riastrad 				if (adj_end <= adj_start ||
    686   1.7  riastrad 				    adj_end - adj_start < size)
    687   1.7  riastrad 					continue;
    688   1.7  riastrad 			}
    689   1.3  riastrad 		}
    690   1.1  riastrad 
    691   1.7  riastrad 		node->mm = mm;
    692   1.7  riastrad 		node->size = size;
    693   1.7  riastrad 		node->start = adj_start;
    694   1.7  riastrad 		node->color = color;
    695   1.7  riastrad 		node->hole_size = 0;
    696   1.1  riastrad 
    697   1.7  riastrad 		__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    698   1.7  riastrad 		list_add(&node->node_list, &hole->node_list);
    699  1.10  riastrad #ifndef __NetBSD__
    700   1.7  riastrad 		drm_mm_interval_tree_add_node(hole, node);
    701  1.10  riastrad #endif
    702   1.1  riastrad 
    703   1.7  riastrad 		rm_hole(hole);
    704   1.7  riastrad 		if (adj_start > hole_start)
    705   1.7  riastrad 			add_hole(hole);
    706   1.7  riastrad 		if (adj_start + size < hole_end)
    707   1.7  riastrad 			add_hole(node);
    708   1.1  riastrad 
    709   1.7  riastrad 		save_stack(node);
    710   1.7  riastrad 		return 0;
    711   1.7  riastrad 	}
    712   1.1  riastrad 
    713   1.7  riastrad 	return -ENOSPC;
    714   1.1  riastrad }
    715   1.7  riastrad EXPORT_SYMBOL(drm_mm_insert_node_in_range);
    716   1.1  riastrad 
    717   1.7  riastrad static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
    718   1.7  riastrad {
    719   1.7  riastrad 	return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
    720   1.1  riastrad }
    721   1.1  riastrad 
    722   1.1  riastrad /**
    723   1.3  riastrad  * drm_mm_remove_node - Remove a memory node from the allocator.
    724   1.3  riastrad  * @node: drm_mm_node to remove
    725   1.3  riastrad  *
    726   1.3  riastrad  * This just removes a node from its drm_mm allocator. The node does not need to
    727   1.3  riastrad  * be cleared again before it can be re-inserted into this or any other drm_mm
    728   1.7  riastrad  * allocator. It is a bug to call this function on a unallocated node.
    729   1.1  riastrad  */
    730   1.1  riastrad void drm_mm_remove_node(struct drm_mm_node *node)
    731   1.1  riastrad {
    732   1.1  riastrad 	struct drm_mm *mm = node->mm;
    733   1.1  riastrad 	struct drm_mm_node *prev_node;
    734   1.1  riastrad 
    735   1.7  riastrad 	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
    736   1.7  riastrad 	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
    737   1.1  riastrad 
    738   1.7  riastrad 	prev_node = list_prev_entry(node, node_list);
    739   1.1  riastrad 
    740   1.7  riastrad 	if (drm_mm_hole_follows(node))
    741   1.7  riastrad 		rm_hole(node);
    742   1.1  riastrad 
    743  1.10  riastrad #ifdef __NetBSD__
    744  1.10  riastrad 	__USE(mm);
    745  1.10  riastrad #else
    746   1.7  riastrad 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
    747  1.10  riastrad #endif
    748   1.1  riastrad 	list_del(&node->node_list);
    749   1.4  riastrad 
    750   1.7  riastrad 	if (drm_mm_hole_follows(prev_node))
    751   1.7  riastrad 		rm_hole(prev_node);
    752   1.7  riastrad 	add_hole(prev_node);
    753   1.1  riastrad 
    754   1.7  riastrad 	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
    755   1.1  riastrad }
    756   1.7  riastrad EXPORT_SYMBOL(drm_mm_remove_node);
    757   1.1  riastrad 
    758   1.1  riastrad /**
    759   1.3  riastrad  * drm_mm_replace_node - move an allocation from @old to @new
    760   1.3  riastrad  * @old: drm_mm_node to remove from the allocator
    761   1.3  riastrad  * @new: drm_mm_node which should inherit @old's allocation
    762   1.3  riastrad  *
    763   1.3  riastrad  * This is useful for when drivers embed the drm_mm_node structure and hence
    764   1.3  riastrad  * can't move allocations by reassigning pointers. It's a combination of remove
    765   1.3  riastrad  * and insert with the guarantee that the allocation start will match.
    766   1.1  riastrad  */
    767   1.1  riastrad void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
    768   1.1  riastrad {
    769   1.7  riastrad 	struct drm_mm *mm = old->mm;
    770   1.7  riastrad 
    771   1.7  riastrad 	DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
    772   1.7  riastrad 
    773   1.7  riastrad 	*new = *old;
    774   1.7  riastrad 
    775   1.7  riastrad 	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
    776   1.1  riastrad 	list_replace(&old->node_list, &new->node_list);
    777  1.10  riastrad #ifndef __NetBSD__
    778   1.7  riastrad 	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
    779  1.10  riastrad #endif
    780   1.7  riastrad 
    781   1.7  riastrad 	if (drm_mm_hole_follows(old)) {
    782   1.7  riastrad 		list_replace(&old->hole_stack, &new->hole_stack);
    783   1.7  riastrad 		rb_replace_node_cached(&old->rb_hole_size,
    784   1.7  riastrad 				       &new->rb_hole_size,
    785   1.7  riastrad 				       &mm->holes_size);
    786   1.7  riastrad 		rb_replace_node(&old->rb_hole_addr,
    787   1.7  riastrad 				&new->rb_hole_addr,
    788   1.7  riastrad 				&mm->holes_addr);
    789   1.7  riastrad 	}
    790   1.1  riastrad 
    791   1.7  riastrad 	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
    792   1.1  riastrad }
    793   1.1  riastrad EXPORT_SYMBOL(drm_mm_replace_node);
    794   1.1  riastrad 
    795   1.1  riastrad /**
    796   1.7  riastrad  * DOC: lru scan roster
    797   1.3  riastrad  *
    798   1.3  riastrad  * Very often GPUs need to have continuous allocations for a given object. When
    799   1.3  riastrad  * evicting objects to make space for a new one it is therefore not most
    800   1.3  riastrad  * efficient when we simply start to select all objects from the tail of an LRU
    801   1.3  riastrad  * until there's a suitable hole: Especially for big objects or nodes that
    802   1.3  riastrad  * otherwise have special allocation constraints there's a good chance we evict
    803   1.7  riastrad  * lots of (smaller) objects unnecessarily.
    804   1.3  riastrad  *
    805   1.3  riastrad  * The DRM range allocator supports this use-case through the scanning
    806   1.3  riastrad  * interfaces. First a scan operation needs to be initialized with
    807   1.7  riastrad  * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
    808   1.7  riastrad  * objects to the roster, probably by walking an LRU list, but this can be
    809   1.7  riastrad  * freely implemented. Eviction candiates are added using
    810   1.7  riastrad  * drm_mm_scan_add_block() until a suitable hole is found or there are no
    811   1.7  riastrad  * further evictable objects. Eviction roster metadata is tracked in &struct
    812   1.7  riastrad  * drm_mm_scan.
    813   1.3  riastrad  *
    814   1.7  riastrad  * The driver must walk through all objects again in exactly the reverse
    815   1.3  riastrad  * order to restore the allocator state. Note that while the allocator is used
    816   1.3  riastrad  * in the scan mode no other operation is allowed.
    817   1.3  riastrad  *
    818   1.7  riastrad  * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
    819   1.7  riastrad  * reported true) in the scan, and any overlapping nodes after color adjustment
    820   1.7  riastrad  * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
    821   1.7  riastrad  * since freeing a node is also O(1) the overall complexity is
    822   1.7  riastrad  * O(scanned_objects). So like the free stack which needs to be walked before a
    823   1.7  riastrad  * scan operation even begins this is linear in the number of objects. It
    824   1.7  riastrad  * doesn't seem to hurt too badly.
    825   1.3  riastrad  */
    826   1.3  riastrad 
    827   1.3  riastrad /**
    828   1.7  riastrad  * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
    829   1.7  riastrad  * @scan: scan state
    830   1.3  riastrad  * @mm: drm_mm to scan
    831   1.3  riastrad  * @size: size of the allocation
    832   1.3  riastrad  * @alignment: alignment of the allocation
    833   1.3  riastrad  * @color: opaque tag value to use for the allocation
    834   1.3  riastrad  * @start: start of the allowed range for the allocation
    835   1.3  riastrad  * @end: end of the allowed range for the allocation
    836   1.7  riastrad  * @mode: fine-tune the allocation search and placement
    837   1.1  riastrad  *
    838   1.1  riastrad  * This simply sets up the scanning routines with the parameters for the desired
    839   1.7  riastrad  * hole.
    840   1.1  riastrad  *
    841   1.3  riastrad  * Warning:
    842   1.3  riastrad  * As long as the scan list is non-empty, no other operations than
    843   1.1  riastrad  * adding/removing nodes to/from the scan list are allowed.
    844   1.1  riastrad  */
    845   1.7  riastrad void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
    846   1.7  riastrad 				 struct drm_mm *mm,
    847   1.4  riastrad 				 u64 size,
    848   1.7  riastrad 				 u64 alignment,
    849   1.1  riastrad 				 unsigned long color,
    850   1.4  riastrad 				 u64 start,
    851   1.7  riastrad 				 u64 end,
    852   1.7  riastrad 				 enum drm_mm_insert_mode mode)
    853   1.1  riastrad {
    854   1.7  riastrad 	DRM_MM_BUG_ON(start >= end);
    855   1.7  riastrad 	DRM_MM_BUG_ON(!size || size > end - start);
    856   1.7  riastrad 	DRM_MM_BUG_ON(mm->scan_active);
    857   1.7  riastrad 
    858   1.7  riastrad 	scan->mm = mm;
    859   1.7  riastrad 
    860   1.7  riastrad 	if (alignment <= 1)
    861   1.7  riastrad 		alignment = 0;
    862   1.7  riastrad 
    863   1.7  riastrad 	scan->color = color;
    864   1.7  riastrad 	scan->alignment = alignment;
    865   1.7  riastrad 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
    866   1.7  riastrad 	scan->size = size;
    867   1.7  riastrad 	scan->mode = mode;
    868   1.7  riastrad 
    869   1.7  riastrad 	DRM_MM_BUG_ON(end <= start);
    870   1.7  riastrad 	scan->range_start = start;
    871   1.7  riastrad 	scan->range_end = end;
    872   1.7  riastrad 
    873   1.7  riastrad 	scan->hit_start = U64_MAX;
    874   1.7  riastrad 	scan->hit_end = 0;
    875   1.1  riastrad }
    876   1.7  riastrad EXPORT_SYMBOL(drm_mm_scan_init_with_range);
    877   1.1  riastrad 
    878   1.1  riastrad /**
    879   1.3  riastrad  * drm_mm_scan_add_block - add a node to the scan list
    880   1.7  riastrad  * @scan: the active drm_mm scanner
    881   1.3  riastrad  * @node: drm_mm_node to add
    882   1.3  riastrad  *
    883   1.1  riastrad  * Add a node to the scan list that might be freed to make space for the desired
    884   1.1  riastrad  * hole.
    885   1.1  riastrad  *
    886   1.3  riastrad  * Returns:
    887   1.3  riastrad  * True if a hole has been found, false otherwise.
    888   1.1  riastrad  */
    889   1.7  riastrad bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
    890   1.7  riastrad 			   struct drm_mm_node *node)
    891   1.1  riastrad {
    892   1.7  riastrad 	struct drm_mm *mm = scan->mm;
    893   1.7  riastrad 	struct drm_mm_node *hole;
    894   1.4  riastrad 	u64 hole_start, hole_end;
    895   1.7  riastrad 	u64 col_start, col_end;
    896   1.4  riastrad 	u64 adj_start, adj_end;
    897   1.1  riastrad 
    898   1.7  riastrad 	DRM_MM_BUG_ON(node->mm != mm);
    899   1.7  riastrad 	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
    900   1.7  riastrad 	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
    901   1.7  riastrad 	__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
    902   1.7  riastrad 	mm->scan_active++;
    903   1.7  riastrad 
    904   1.7  riastrad 	/* Remove this block from the node_list so that we enlarge the hole
    905   1.7  riastrad 	 * (distance between the end of our previous node and the start of
    906   1.7  riastrad 	 * or next), without poisoning the link so that we can restore it
    907   1.7  riastrad 	 * later in drm_mm_scan_remove_block().
    908   1.7  riastrad 	 */
    909   1.7  riastrad 	hole = list_prev_entry(node, node_list);
    910   1.7  riastrad 	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
    911   1.7  riastrad 	__list_del_entry(&node->node_list);
    912   1.1  riastrad 
    913   1.7  riastrad 	hole_start = __drm_mm_hole_node_start(hole);
    914   1.7  riastrad 	hole_end = __drm_mm_hole_node_end(hole);
    915   1.1  riastrad 
    916   1.7  riastrad 	col_start = hole_start;
    917   1.7  riastrad 	col_end = hole_end;
    918   1.7  riastrad 	if (mm->color_adjust)
    919   1.7  riastrad 		mm->color_adjust(hole, scan->color, &col_start, &col_end);
    920   1.1  riastrad 
    921   1.7  riastrad 	adj_start = max(col_start, scan->range_start);
    922   1.7  riastrad 	adj_end = min(col_end, scan->range_end);
    923   1.7  riastrad 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
    924   1.7  riastrad 		return false;
    925   1.7  riastrad 
    926   1.7  riastrad 	if (scan->mode == DRM_MM_INSERT_HIGH)
    927   1.7  riastrad 		adj_start = adj_end - scan->size;
    928   1.7  riastrad 
    929   1.7  riastrad 	if (scan->alignment) {
    930   1.7  riastrad 		u64 rem;
    931   1.7  riastrad 
    932   1.7  riastrad 		if (likely(scan->remainder_mask))
    933   1.7  riastrad 			rem = adj_start & scan->remainder_mask;
    934   1.7  riastrad 		else
    935   1.7  riastrad 			div64_u64_rem(adj_start, scan->alignment, &rem);
    936   1.7  riastrad 		if (rem) {
    937   1.7  riastrad 			adj_start -= rem;
    938   1.7  riastrad 			if (scan->mode != DRM_MM_INSERT_HIGH)
    939   1.7  riastrad 				adj_start += scan->alignment;
    940   1.7  riastrad 			if (adj_start < max(col_start, scan->range_start) ||
    941   1.7  riastrad 			    min(col_end, scan->range_end) - adj_start < scan->size)
    942   1.7  riastrad 				return false;
    943   1.7  riastrad 
    944   1.7  riastrad 			if (adj_end <= adj_start ||
    945   1.7  riastrad 			    adj_end - adj_start < scan->size)
    946   1.7  riastrad 				return false;
    947   1.7  riastrad 		}
    948   1.1  riastrad 	}
    949   1.1  riastrad 
    950   1.7  riastrad 	scan->hit_start = adj_start;
    951   1.7  riastrad 	scan->hit_end = adj_start + scan->size;
    952   1.1  riastrad 
    953   1.7  riastrad 	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
    954   1.7  riastrad 	DRM_MM_BUG_ON(scan->hit_start < hole_start);
    955   1.7  riastrad 	DRM_MM_BUG_ON(scan->hit_end > hole_end);
    956   1.1  riastrad 
    957   1.7  riastrad 	return true;
    958   1.1  riastrad }
    959   1.1  riastrad EXPORT_SYMBOL(drm_mm_scan_add_block);
    960   1.1  riastrad 
    961   1.1  riastrad /**
    962   1.3  riastrad  * drm_mm_scan_remove_block - remove a node from the scan list
    963   1.7  riastrad  * @scan: the active drm_mm scanner
    964   1.3  riastrad  * @node: drm_mm_node to remove
    965   1.1  riastrad  *
    966   1.7  riastrad  * Nodes **must** be removed in exactly the reverse order from the scan list as
    967   1.7  riastrad  * they have been added (e.g. using list_add() as they are added and then
    968   1.7  riastrad  * list_for_each() over that eviction list to remove), otherwise the internal
    969   1.7  riastrad  * state of the memory manager will be corrupted.
    970   1.1  riastrad  *
    971   1.1  riastrad  * When the scan list is empty, the selected memory nodes can be freed. An
    972   1.7  riastrad  * immediately following drm_mm_insert_node_in_range_generic() or one of the
    973   1.7  riastrad  * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
    974   1.7  riastrad  * the just freed block (because it's at the top of the free_stack list).
    975   1.1  riastrad  *
    976   1.3  riastrad  * Returns:
    977   1.3  riastrad  * True if this block should be evicted, false otherwise. Will always
    978   1.3  riastrad  * return false when no hole has been found.
    979   1.1  riastrad  */
    980   1.7  riastrad bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
    981   1.7  riastrad 			      struct drm_mm_node *node)
    982   1.1  riastrad {
    983   1.1  riastrad 	struct drm_mm_node *prev_node;
    984   1.1  riastrad 
    985   1.7  riastrad 	DRM_MM_BUG_ON(node->mm != scan->mm);
    986   1.7  riastrad 	DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
    987   1.7  riastrad 	__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
    988   1.7  riastrad 
    989   1.7  riastrad 	DRM_MM_BUG_ON(!node->mm->scan_active);
    990   1.7  riastrad 	node->mm->scan_active--;
    991   1.7  riastrad 
    992   1.7  riastrad 	/* During drm_mm_scan_add_block() we decoupled this node leaving
    993   1.7  riastrad 	 * its pointers intact. Now that the caller is walking back along
    994   1.7  riastrad 	 * the eviction list we can restore this block into its rightful
    995   1.7  riastrad 	 * place on the full node_list. To confirm that the caller is walking
    996   1.7  riastrad 	 * backwards correctly we check that prev_node->next == node->next,
    997   1.7  riastrad 	 * i.e. both believe the same node should be on the other side of the
    998   1.7  riastrad 	 * hole.
    999   1.7  riastrad 	 */
   1000   1.7  riastrad 	prev_node = list_prev_entry(node, node_list);
   1001   1.7  riastrad 	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
   1002   1.7  riastrad 		      list_next_entry(node, node_list));
   1003   1.1  riastrad 	list_add(&node->node_list, &prev_node->node_list);
   1004   1.1  riastrad 
   1005   1.7  riastrad 	return (node->start + node->size > scan->hit_start &&
   1006   1.7  riastrad 		node->start < scan->hit_end);
   1007   1.1  riastrad }
   1008   1.1  riastrad EXPORT_SYMBOL(drm_mm_scan_remove_block);
   1009   1.1  riastrad 
   1010   1.3  riastrad /**
   1011   1.7  riastrad  * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
   1012   1.7  riastrad  * @scan: drm_mm scan with target hole
   1013   1.7  riastrad  *
   1014   1.7  riastrad  * After completing an eviction scan and removing the selected nodes, we may
   1015   1.7  riastrad  * need to remove a few more nodes from either side of the target hole if
   1016   1.7  riastrad  * mm.color_adjust is being used.
   1017   1.3  riastrad  *
   1018   1.3  riastrad  * Returns:
   1019   1.7  riastrad  * A node to evict, or NULL if there are no overlapping nodes.
   1020   1.3  riastrad  */
   1021   1.7  riastrad struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
   1022   1.1  riastrad {
   1023   1.7  riastrad 	struct drm_mm *mm = scan->mm;
   1024   1.7  riastrad 	struct drm_mm_node *hole;
   1025   1.7  riastrad 	u64 hole_start, hole_end;
   1026   1.7  riastrad 
   1027   1.7  riastrad 	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
   1028   1.7  riastrad 
   1029   1.7  riastrad 	if (!mm->color_adjust)
   1030   1.7  riastrad 		return NULL;
   1031   1.7  riastrad 
   1032   1.7  riastrad 	/*
   1033   1.7  riastrad 	 * The hole found during scanning should ideally be the first element
   1034   1.7  riastrad 	 * in the hole_stack list, but due to side-effects in the driver it
   1035   1.7  riastrad 	 * may not be.
   1036   1.7  riastrad 	 */
   1037   1.7  riastrad 	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
   1038   1.7  riastrad 		hole_start = __drm_mm_hole_node_start(hole);
   1039   1.7  riastrad 		hole_end = hole_start + hole->hole_size;
   1040   1.7  riastrad 
   1041   1.7  riastrad 		if (hole_start <= scan->hit_start &&
   1042   1.7  riastrad 		    hole_end >= scan->hit_end)
   1043   1.7  riastrad 			break;
   1044   1.7  riastrad 	}
   1045   1.7  riastrad 
   1046   1.7  riastrad 	/* We should only be called after we found the hole previously */
   1047   1.7  riastrad 	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
   1048   1.7  riastrad 	if (unlikely(&hole->hole_stack == &mm->hole_stack))
   1049   1.7  riastrad 		return NULL;
   1050   1.7  riastrad 
   1051   1.7  riastrad 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
   1052   1.7  riastrad 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
   1053   1.7  riastrad 
   1054   1.7  riastrad 	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
   1055   1.7  riastrad 	if (hole_start > scan->hit_start)
   1056   1.7  riastrad 		return hole;
   1057   1.7  riastrad 	if (hole_end < scan->hit_end)
   1058   1.7  riastrad 		return list_next_entry(hole, node_list);
   1059   1.1  riastrad 
   1060   1.7  riastrad 	return NULL;
   1061   1.1  riastrad }
   1062   1.7  riastrad EXPORT_SYMBOL(drm_mm_scan_color_evict);
   1063   1.1  riastrad 
   1064   1.3  riastrad /**
   1065   1.3  riastrad  * drm_mm_init - initialize a drm-mm allocator
   1066   1.3  riastrad  * @mm: the drm_mm structure to initialize
   1067   1.3  riastrad  * @start: start of the range managed by @mm
   1068   1.3  riastrad  * @size: end of the range managed by @mm
   1069   1.3  riastrad  *
   1070   1.3  riastrad  * Note that @mm must be cleared to 0 before calling this function.
   1071   1.3  riastrad  */
   1072   1.7  riastrad void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
   1073   1.1  riastrad {
   1074   1.7  riastrad 	DRM_MM_BUG_ON(start + size <= start);
   1075   1.7  riastrad 
   1076   1.7  riastrad 	mm->color_adjust = NULL;
   1077   1.7  riastrad 
   1078   1.1  riastrad 	INIT_LIST_HEAD(&mm->hole_stack);
   1079   1.9  riastrad #ifdef __NetBSD__
   1080  1.10  riastrad 	/* XXX interval tree */
   1081   1.9  riastrad 	rb_tree_init(&mm->holes_size.rb_root.rbr_tree, &holes_size_rb_ops);
   1082   1.9  riastrad 	rb_tree_init(&mm->holes_addr.rbr_tree, &holes_addr_rb_ops);
   1083   1.9  riastrad #else
   1084   1.7  riastrad 	mm->interval_tree = RB_ROOT_CACHED;
   1085   1.7  riastrad 	mm->holes_size = RB_ROOT_CACHED;
   1086   1.7  riastrad 	mm->holes_addr = RB_ROOT;
   1087   1.9  riastrad #endif
   1088   1.1  riastrad 
   1089   1.1  riastrad 	/* Clever trick to avoid a special case in the free hole tracking. */
   1090   1.1  riastrad 	INIT_LIST_HEAD(&mm->head_node.node_list);
   1091   1.7  riastrad 	mm->head_node.flags = 0;
   1092   1.1  riastrad 	mm->head_node.mm = mm;
   1093   1.1  riastrad 	mm->head_node.start = start + size;
   1094   1.7  riastrad 	mm->head_node.size = -size;
   1095   1.7  riastrad 	add_hole(&mm->head_node);
   1096   1.1  riastrad 
   1097   1.7  riastrad 	mm->scan_active = 0;
   1098   1.1  riastrad }
   1099   1.1  riastrad EXPORT_SYMBOL(drm_mm_init);
   1100   1.1  riastrad 
   1101   1.3  riastrad /**
   1102   1.3  riastrad  * drm_mm_takedown - clean up a drm_mm allocator
   1103   1.3  riastrad  * @mm: drm_mm allocator to clean up
   1104   1.3  riastrad  *
   1105   1.3  riastrad  * Note that it is a bug to call this function on an allocator which is not
   1106   1.3  riastrad  * clean.
   1107   1.3  riastrad  */
   1108   1.7  riastrad void drm_mm_takedown(struct drm_mm *mm)
   1109   1.1  riastrad {
   1110   1.7  riastrad 	if (WARN(!drm_mm_clean(mm),
   1111   1.7  riastrad 		 "Memory manager not clean during takedown.\n"))
   1112   1.7  riastrad 		show_leaks(mm);
   1113   1.3  riastrad }
   1114   1.3  riastrad EXPORT_SYMBOL(drm_mm_takedown);
   1115   1.1  riastrad 
   1116   1.7  riastrad static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
   1117   1.3  riastrad {
   1118   1.7  riastrad 	u64 start, size;
   1119   1.1  riastrad 
   1120   1.7  riastrad 	size = entry->hole_size;
   1121   1.7  riastrad 	if (size) {
   1122   1.7  riastrad 		start = drm_mm_hole_node_start(entry);
   1123   1.7  riastrad 		drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": free\n",
   1124   1.7  riastrad 			   start, start + size, size);
   1125   1.1  riastrad 	}
   1126   1.1  riastrad 
   1127   1.7  riastrad 	return size;
   1128   1.1  riastrad }
   1129   1.3  riastrad /**
   1130   1.7  riastrad  * drm_mm_print - print allocator state
   1131   1.7  riastrad  * @mm: drm_mm allocator to print
   1132   1.7  riastrad  * @p: DRM printer to use
   1133   1.3  riastrad  */
   1134   1.7  riastrad void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
   1135   1.1  riastrad {
   1136   1.7  riastrad 	const struct drm_mm_node *entry;
   1137   1.4  riastrad 	u64 total_used = 0, total_free = 0, total = 0;
   1138   1.1  riastrad 
   1139   1.7  riastrad 	total_free += drm_mm_dump_hole(p, &mm->head_node);
   1140   1.1  riastrad 
   1141   1.1  riastrad 	drm_mm_for_each_node(entry, mm) {
   1142  1.10  riastrad 		drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": used\n", entry->start,
   1143   1.4  riastrad 			   entry->start + entry->size, entry->size);
   1144   1.1  riastrad 		total_used += entry->size;
   1145   1.7  riastrad 		total_free += drm_mm_dump_hole(p, entry);
   1146   1.1  riastrad 	}
   1147   1.1  riastrad 	total = total_free + total_used;
   1148   1.1  riastrad 
   1149   1.9  riastrad 	drm_printf(p, "total: %"PRIu64", used %"PRIu64" free %"PRIu64"\n", total,
   1150   1.4  riastrad 		   total_used, total_free);
   1151   1.1  riastrad }
   1152   1.7  riastrad EXPORT_SYMBOL(drm_mm_print);
   1153