Home | History | Annotate | Line # | Download | only in ttm
ttm_execbuf_util.c revision 1.1
      1 /**************************************************************************
      2  *
      3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 #include <drm/ttm/ttm_execbuf_util.h>
     29 #include <drm/ttm/ttm_bo_driver.h>
     30 #include <drm/ttm/ttm_placement.h>
     31 #include <linux/wait.h>
     32 #include <linux/sched.h>
     33 #include <linux/module.h>
     34 
     35 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
     36 {
     37 	struct ttm_validate_buffer *entry;
     38 
     39 	list_for_each_entry(entry, list, head) {
     40 		struct ttm_buffer_object *bo = entry->bo;
     41 		if (!entry->reserved)
     42 			continue;
     43 
     44 		if (entry->removed) {
     45 			ttm_bo_add_to_lru(bo);
     46 			entry->removed = false;
     47 
     48 		}
     49 		entry->reserved = false;
     50 		atomic_set(&bo->reserved, 0);
     51 		wake_up_all(&bo->event_queue);
     52 	}
     53 }
     54 
     55 static void ttm_eu_del_from_lru_locked(struct list_head *list)
     56 {
     57 	struct ttm_validate_buffer *entry;
     58 
     59 	list_for_each_entry(entry, list, head) {
     60 		struct ttm_buffer_object *bo = entry->bo;
     61 		if (!entry->reserved)
     62 			continue;
     63 
     64 		if (!entry->removed) {
     65 			entry->put_count = ttm_bo_del_from_lru(bo);
     66 			entry->removed = true;
     67 		}
     68 	}
     69 }
     70 
     71 static void ttm_eu_list_ref_sub(struct list_head *list)
     72 {
     73 	struct ttm_validate_buffer *entry;
     74 
     75 	list_for_each_entry(entry, list, head) {
     76 		struct ttm_buffer_object *bo = entry->bo;
     77 
     78 		if (entry->put_count) {
     79 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
     80 			entry->put_count = 0;
     81 		}
     82 	}
     83 }
     84 
     85 static int ttm_eu_wait_unreserved_locked(struct list_head *list,
     86 					 struct ttm_buffer_object *bo)
     87 {
     88 	struct ttm_bo_global *glob = bo->glob;
     89 	int ret;
     90 
     91 	ttm_eu_del_from_lru_locked(list);
     92 	spin_unlock(&glob->lru_lock);
     93 	ret = ttm_bo_wait_unreserved(bo, true);
     94 	spin_lock(&glob->lru_lock);
     95 	if (unlikely(ret != 0))
     96 		ttm_eu_backoff_reservation_locked(list);
     97 	return ret;
     98 }
     99 
    100 
    101 void ttm_eu_backoff_reservation(struct list_head *list)
    102 {
    103 	struct ttm_validate_buffer *entry;
    104 	struct ttm_bo_global *glob;
    105 
    106 	if (list_empty(list))
    107 		return;
    108 
    109 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
    110 	glob = entry->bo->glob;
    111 	spin_lock(&glob->lru_lock);
    112 	ttm_eu_backoff_reservation_locked(list);
    113 	spin_unlock(&glob->lru_lock);
    114 }
    115 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
    116 
    117 /*
    118  * Reserve buffers for validation.
    119  *
    120  * If a buffer in the list is marked for CPU access, we back off and
    121  * wait for that buffer to become free for GPU access.
    122  *
    123  * If a buffer is reserved for another validation, the validator with
    124  * the highest validation sequence backs off and waits for that buffer
    125  * to become unreserved. This prevents deadlocks when validating multiple
    126  * buffers in different orders.
    127  */
    128 
    129 int ttm_eu_reserve_buffers(struct list_head *list)
    130 {
    131 	struct ttm_bo_global *glob;
    132 	struct ttm_validate_buffer *entry;
    133 	int ret;
    134 	uint32_t val_seq;
    135 
    136 	if (list_empty(list))
    137 		return 0;
    138 
    139 	list_for_each_entry(entry, list, head) {
    140 		entry->reserved = false;
    141 		entry->put_count = 0;
    142 		entry->removed = false;
    143 	}
    144 
    145 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
    146 	glob = entry->bo->glob;
    147 
    148 retry:
    149 	spin_lock(&glob->lru_lock);
    150 	val_seq = entry->bo->bdev->val_seq++;
    151 
    152 	list_for_each_entry(entry, list, head) {
    153 		struct ttm_buffer_object *bo = entry->bo;
    154 
    155 retry_this_bo:
    156 		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
    157 		switch (ret) {
    158 		case 0:
    159 			break;
    160 		case -EBUSY:
    161 			ret = ttm_eu_wait_unreserved_locked(list, bo);
    162 			if (unlikely(ret != 0)) {
    163 				spin_unlock(&glob->lru_lock);
    164 				ttm_eu_list_ref_sub(list);
    165 				return ret;
    166 			}
    167 			goto retry_this_bo;
    168 		case -EAGAIN:
    169 			ttm_eu_backoff_reservation_locked(list);
    170 			spin_unlock(&glob->lru_lock);
    171 			ttm_eu_list_ref_sub(list);
    172 			ret = ttm_bo_wait_unreserved(bo, true);
    173 			if (unlikely(ret != 0))
    174 				return ret;
    175 			goto retry;
    176 		default:
    177 			ttm_eu_backoff_reservation_locked(list);
    178 			spin_unlock(&glob->lru_lock);
    179 			ttm_eu_list_ref_sub(list);
    180 			return ret;
    181 		}
    182 
    183 		entry->reserved = true;
    184 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
    185 			ttm_eu_backoff_reservation_locked(list);
    186 			spin_unlock(&glob->lru_lock);
    187 			ttm_eu_list_ref_sub(list);
    188 			return -EBUSY;
    189 		}
    190 	}
    191 
    192 	ttm_eu_del_from_lru_locked(list);
    193 	spin_unlock(&glob->lru_lock);
    194 	ttm_eu_list_ref_sub(list);
    195 
    196 	return 0;
    197 }
    198 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
    199 
    200 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
    201 {
    202 	struct ttm_validate_buffer *entry;
    203 	struct ttm_buffer_object *bo;
    204 	struct ttm_bo_global *glob;
    205 	struct ttm_bo_device *bdev;
    206 	struct ttm_bo_driver *driver;
    207 
    208 	if (list_empty(list))
    209 		return;
    210 
    211 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
    212 	bdev = bo->bdev;
    213 	driver = bdev->driver;
    214 	glob = bo->glob;
    215 
    216 	spin_lock(&glob->lru_lock);
    217 	spin_lock(&bdev->fence_lock);
    218 
    219 	list_for_each_entry(entry, list, head) {
    220 		bo = entry->bo;
    221 		entry->old_sync_obj = bo->sync_obj;
    222 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
    223 		ttm_bo_unreserve_locked(bo);
    224 		entry->reserved = false;
    225 	}
    226 	spin_unlock(&bdev->fence_lock);
    227 	spin_unlock(&glob->lru_lock);
    228 
    229 	list_for_each_entry(entry, list, head) {
    230 		if (entry->old_sync_obj)
    231 			driver->sync_obj_unref(&entry->old_sync_obj);
    232 	}
    233 }
    234 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
    235