Home | History | Annotate | Line # | Download | only in ttm
ttm_bo_manager.c revision 1.1
      1 /**************************************************************************
      2  *
      3  * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 /*
     28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     29  */
     30 
     31 #include <drm/ttm/ttm_module.h>
     32 #include <drm/ttm/ttm_bo_driver.h>
     33 #include <drm/ttm/ttm_placement.h>
     34 #include <drm/drm_mm.h>
     35 #include <linux/slab.h>
     36 #include <linux/spinlock.h>
     37 #include <linux/module.h>
     38 
     39 /**
     40  * Currently we use a spinlock for the lock, but a mutex *may* be
     41  * more appropriate to reduce scheduling latency if the range manager
     42  * ends up with very fragmented allocation patterns.
     43  */
     44 
     45 struct ttm_range_manager {
     46 	struct drm_mm mm;
     47 	spinlock_t lock;
     48 };
     49 
     50 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
     51 			       struct ttm_buffer_object *bo,
     52 			       struct ttm_placement *placement,
     53 			       struct ttm_mem_reg *mem)
     54 {
     55 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
     56 	struct drm_mm *mm = &rman->mm;
     57 	struct drm_mm_node *node = NULL;
     58 	unsigned long lpfn;
     59 	int ret;
     60 
     61 	lpfn = placement->lpfn;
     62 	if (!lpfn)
     63 		lpfn = man->size;
     64 	do {
     65 		ret = drm_mm_pre_get(mm);
     66 		if (unlikely(ret))
     67 			return ret;
     68 
     69 		spin_lock(&rman->lock);
     70 		node = drm_mm_search_free_in_range(mm,
     71 					mem->num_pages, mem->page_alignment,
     72 					placement->fpfn, lpfn, 1);
     73 		if (unlikely(node == NULL)) {
     74 			spin_unlock(&rman->lock);
     75 			return 0;
     76 		}
     77 		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
     78 						     mem->page_alignment,
     79 						     placement->fpfn,
     80 						     lpfn);
     81 		spin_unlock(&rman->lock);
     82 	} while (node == NULL);
     83 
     84 	mem->mm_node = node;
     85 	mem->start = node->start;
     86 	return 0;
     87 }
     88 
     89 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
     90 				struct ttm_mem_reg *mem)
     91 {
     92 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
     93 
     94 	if (mem->mm_node) {
     95 		spin_lock(&rman->lock);
     96 		drm_mm_put_block(mem->mm_node);
     97 		spin_unlock(&rman->lock);
     98 		mem->mm_node = NULL;
     99 	}
    100 }
    101 
    102 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
    103 			   unsigned long p_size)
    104 {
    105 	struct ttm_range_manager *rman;
    106 	int ret;
    107 
    108 	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
    109 	if (!rman)
    110 		return -ENOMEM;
    111 
    112 	ret = drm_mm_init(&rman->mm, 0, p_size);
    113 	if (ret) {
    114 		kfree(rman);
    115 		return ret;
    116 	}
    117 
    118 	spin_lock_init(&rman->lock);
    119 	man->priv = rman;
    120 	return 0;
    121 }
    122 
    123 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
    124 {
    125 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
    126 	struct drm_mm *mm = &rman->mm;
    127 
    128 	spin_lock(&rman->lock);
    129 	if (drm_mm_clean(mm)) {
    130 		drm_mm_takedown(mm);
    131 		spin_unlock(&rman->lock);
    132 		kfree(rman);
    133 		man->priv = NULL;
    134 		return 0;
    135 	}
    136 	spin_unlock(&rman->lock);
    137 	return -EBUSY;
    138 }
    139 
    140 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
    141 			     const char *prefix)
    142 {
    143 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
    144 
    145 	spin_lock(&rman->lock);
    146 	drm_mm_debug_table(&rman->mm, prefix);
    147 	spin_unlock(&rman->lock);
    148 }
    149 
    150 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
    151 	ttm_bo_man_init,
    152 	ttm_bo_man_takedown,
    153 	ttm_bo_man_get_node,
    154 	ttm_bo_man_put_node,
    155 	ttm_bo_man_debug
    156 };
    157 EXPORT_SYMBOL(ttm_bo_manager_func);
    158