Home | History | Annotate | Line # | Download | only in linux
interval_tree.h revision 1.6
      1 /*	$NetBSD: interval_tree.h,v 1.6 2018/08/27 07:51:59 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef	_LINUX_INTERVAL_TREE_H_
     33 #define	_LINUX_INTERVAL_TREE_H_
     34 
     35 #include <sys/rbtree.h>
     36 
     37 struct rb_root {
     38 	struct rb_tree	rbr_tree;
     39 };
     40 
     41 static inline bool
     42 RB_EMPTY_ROOT(struct rb_root *root)
     43 {
     44 
     45 	return RB_TREE_MIN(&root->rbr_tree) == NULL;
     46 }
     47 
     48 struct interval_tree_node {
     49 	struct rb_node	itn_node;
     50 	unsigned long	start;	/* inclusive */
     51 	unsigned long	last;	/* inclusive */
     52 };
     53 
     54 static inline int
     55 interval_tree_compare_nodes(void *cookie, const void *va, const void *vb)
     56 {
     57 	const struct interval_tree_node *na = va;
     58 	const struct interval_tree_node *nb = vb;
     59 
     60 	if (na->start < nb->start)
     61 		return -1;
     62 	if (na->start > nb->start)
     63 		return +1;
     64 	if (na->last < nb->last)
     65 		return -1;
     66 	if (na->last > nb->last)
     67 		return +1;
     68 	return 0;
     69 }
     70 
     71 static inline int
     72 interval_tree_compare_key(void *cookie, const void *vn, const void *vk)
     73 {
     74 	const struct interval_tree_node *n = vn;
     75 	const unsigned long *k = vk;
     76 
     77 	if (n->start < *k)
     78 		return -1;
     79 	if (*k < n->start)
     80 		return +1;
     81 	return 0;
     82 }
     83 
     84 static const rb_tree_ops_t interval_tree_ops = {
     85 	.rbto_compare_nodes = interval_tree_compare_nodes,
     86 	.rbto_compare_key = interval_tree_compare_key,
     87 	.rbto_node_offset = offsetof(struct interval_tree_node, itn_node),
     88 };
     89 
     90 static inline void
     91 interval_tree_init(struct rb_root *root)
     92 {
     93 
     94 	rb_tree_init(&root->rbr_tree, &interval_tree_ops);
     95 }
     96 
     97 static inline void
     98 interval_tree_insert(struct interval_tree_node *node, struct rb_root *root)
     99 {
    100 	struct interval_tree_node *collision __diagused;
    101 
    102 	collision = rb_tree_insert_node(&root->rbr_tree, node);
    103 	KASSERT(collision == node);
    104 }
    105 
    106 static inline void
    107 interval_tree_remove(struct interval_tree_node *node, struct rb_root *root)
    108 {
    109 
    110 	rb_tree_remove_node(&root->rbr_tree, node);
    111 }
    112 
    113 static inline struct interval_tree_node *
    114 interval_tree_iter_first(struct rb_root *root, unsigned long start,
    115     unsigned long last)
    116 {
    117 	struct interval_tree_node *node;
    118 
    119 	node = rb_tree_find_node_geq(&root->rbr_tree, &start);
    120 	if (node == NULL)
    121 		return NULL;
    122 	KASSERT(node->start <= start);
    123 	if (last < node->start)
    124 		return NULL;
    125 
    126 	return node;
    127 }
    128 
    129 /*
    130  * XXX Linux's interval_tree_iter_next doesn't take the root as an
    131  * argument, which makes this difficult.  So we'll just patch those
    132  * uses.
    133  */
    134 static inline struct interval_tree_node *
    135 interval_tree_iter_next(struct rb_root *root, struct interval_tree_node *node,
    136     unsigned long start, unsigned long last)
    137 {
    138 	struct interval_tree_node *next;
    139 
    140 	KASSERT(node != NULL);
    141 	next = rb_tree_iterate(&root->rbr_tree, node, RB_DIR_RIGHT);
    142 	if (next == NULL)
    143 		return NULL;
    144 	KASSERT(node->start <= start);
    145 	if (last < node->start)
    146 		return NULL;
    147 }
    148 
    149 /*
    150  * XXX This is not actually postorder, but I can't fathom why you would
    151  * want postorder for an ordered tree; different insertion orders lead
    152  * to different traversal orders.
    153  */
    154 #define	rbtree_postorder_for_each_entry_safe(NODE, TMP, ROOT, FIELD)	      \
    155 	for ((NODE) = RB_TREE_MIN(&(ROOT)->rbr_tree);			      \
    156 		((NODE) != NULL &&					      \
    157 		    ((TMP) = rb_tree_iterate(&(ROOT)->rbr_tree, (NODE),	      \
    158 			RB_DIR_RIGHT)));				      \
    159 		(NODE) = (TMP))
    160 
    161 #endif	/* _LINUX_INTERVAL_TREE_H_ */
    162