nouveau_fence.c revision 7ec681f3
1/* 2 * Copyright 2010 Christoph Bumiller 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include "nouveau_screen.h" 24#include "nouveau_winsys.h" 25#include "nouveau_fence.h" 26#include "util/os_time.h" 27 28#ifdef PIPE_OS_UNIX 29#include <sched.h> 30#endif 31 32bool 33nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence) 34{ 35 *fence = CALLOC_STRUCT(nouveau_fence); 36 if (!*fence) 37 return false; 38 39 (*fence)->screen = screen; 40 (*fence)->ref = 1; 41 list_inithead(&(*fence)->work); 42 43 return true; 44} 45 46static void 47nouveau_fence_trigger_work(struct nouveau_fence *fence) 48{ 49 struct nouveau_fence_work *work, *tmp; 50 51 LIST_FOR_EACH_ENTRY_SAFE(work, tmp, &fence->work, list) { 52 work->func(work->data); 53 list_del(&work->list); 54 FREE(work); 55 } 56} 57 58void 59nouveau_fence_emit(struct nouveau_fence *fence) 60{ 61 struct nouveau_screen *screen = fence->screen; 62 63 assert(fence->state == NOUVEAU_FENCE_STATE_AVAILABLE); 64 65 /* set this now, so that if fence.emit triggers a flush we don't recurse */ 66 fence->state = NOUVEAU_FENCE_STATE_EMITTING; 67 68 ++fence->ref; 69 70 if (screen->fence.tail) 71 screen->fence.tail->next = fence; 72 else 73 screen->fence.head = fence; 74 75 screen->fence.tail = fence; 76 77 screen->fence.emit(&screen->base, &fence->sequence); 78 79 assert(fence->state == NOUVEAU_FENCE_STATE_EMITTING); 80 fence->state = NOUVEAU_FENCE_STATE_EMITTED; 81} 82 83void 84nouveau_fence_del(struct nouveau_fence *fence) 85{ 86 struct nouveau_fence *it; 87 struct nouveau_screen *screen = fence->screen; 88 89 if (fence->state == NOUVEAU_FENCE_STATE_EMITTED || 90 fence->state == NOUVEAU_FENCE_STATE_FLUSHED) { 91 if (fence == screen->fence.head) { 92 screen->fence.head = fence->next; 93 if (!screen->fence.head) 94 screen->fence.tail = NULL; 95 } else { 96 for (it = screen->fence.head; it && it->next != fence; it = it->next); 97 it->next = fence->next; 98 if (screen->fence.tail == fence) 99 screen->fence.tail = it; 100 } 101 } 102 103 if (!list_is_empty(&fence->work)) { 104 debug_printf("WARNING: deleting fence with work still pending !\n"); 105 nouveau_fence_trigger_work(fence); 106 } 107 108 FREE(fence); 109} 110 111void 112nouveau_fence_cleanup(struct nouveau_screen *screen) 113{ 114 if (screen->fence.current) { 115 struct nouveau_fence *current = NULL; 116 117 /* nouveau_fence_wait will create a new current fence, so wait on the 118 * _current_ one, and remove both. 119 */ 120 nouveau_fence_ref(screen->fence.current, ¤t); 121 nouveau_fence_wait(current, NULL); 122 nouveau_fence_ref(NULL, ¤t); 123 nouveau_fence_ref(NULL, &screen->fence.current); 124 } 125} 126 127void 128nouveau_fence_update(struct nouveau_screen *screen, bool flushed) 129{ 130 struct nouveau_fence *fence; 131 struct nouveau_fence *next = NULL; 132 u32 sequence = screen->fence.update(&screen->base); 133 134 if (screen->fence.sequence_ack == sequence) 135 return; 136 screen->fence.sequence_ack = sequence; 137 138 for (fence = screen->fence.head; fence; fence = next) { 139 next = fence->next; 140 sequence = fence->sequence; 141 142 fence->state = NOUVEAU_FENCE_STATE_SIGNALLED; 143 144 nouveau_fence_trigger_work(fence); 145 nouveau_fence_ref(NULL, &fence); 146 147 if (sequence == screen->fence.sequence_ack) 148 break; 149 } 150 screen->fence.head = next; 151 if (!next) 152 screen->fence.tail = NULL; 153 154 if (flushed) { 155 for (fence = next; fence; fence = fence->next) 156 if (fence->state == NOUVEAU_FENCE_STATE_EMITTED) 157 fence->state = NOUVEAU_FENCE_STATE_FLUSHED; 158 } 159} 160 161#define NOUVEAU_FENCE_MAX_SPINS (1 << 31) 162 163bool 164nouveau_fence_signalled(struct nouveau_fence *fence) 165{ 166 struct nouveau_screen *screen = fence->screen; 167 168 if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) 169 return true; 170 171 if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED) 172 nouveau_fence_update(screen, false); 173 174 return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED; 175} 176 177static bool 178nouveau_fence_kick(struct nouveau_fence *fence) 179{ 180 struct nouveau_screen *screen = fence->screen; 181 182 /* wtf, someone is waiting on a fence in flush_notify handler? */ 183 assert(fence->state != NOUVEAU_FENCE_STATE_EMITTING); 184 185 if (fence->state < NOUVEAU_FENCE_STATE_EMITTED) { 186 PUSH_SPACE(screen->pushbuf, 8); 187 /* The space allocation might trigger a flush, which could emit the 188 * current fence. So check again. 189 */ 190 if (fence->state < NOUVEAU_FENCE_STATE_EMITTED) 191 nouveau_fence_emit(fence); 192 } 193 194 if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED) 195 if (nouveau_pushbuf_kick(screen->pushbuf, screen->pushbuf->channel)) 196 return false; 197 198 if (fence == screen->fence.current) 199 nouveau_fence_next(screen); 200 201 nouveau_fence_update(screen, false); 202 203 return true; 204} 205 206bool 207nouveau_fence_wait(struct nouveau_fence *fence, struct pipe_debug_callback *debug) 208{ 209 struct nouveau_screen *screen = fence->screen; 210 uint32_t spins = 0; 211 int64_t start = 0; 212 213 if (debug && debug->debug_message) 214 start = os_time_get_nano(); 215 216 if (!nouveau_fence_kick(fence)) 217 return false; 218 219 do { 220 if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) { 221 if (debug && debug->debug_message) 222 pipe_debug_message(debug, PERF_INFO, 223 "stalled %.3f ms waiting for fence", 224 (os_time_get_nano() - start) / 1000000.f); 225 return true; 226 } 227 if (!spins) 228 NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1); 229 spins++; 230#ifdef PIPE_OS_UNIX 231 if (!(spins % 8)) /* donate a few cycles */ 232 sched_yield(); 233#endif 234 235 nouveau_fence_update(screen, false); 236 } while (spins < NOUVEAU_FENCE_MAX_SPINS); 237 238 debug_printf("Wait on fence %u (ack = %u, next = %u) timed out !\n", 239 fence->sequence, 240 screen->fence.sequence_ack, screen->fence.sequence); 241 242 return false; 243} 244 245void 246nouveau_fence_next(struct nouveau_screen *screen) 247{ 248 if (screen->fence.current->state < NOUVEAU_FENCE_STATE_EMITTING) { 249 if (screen->fence.current->ref > 1) 250 nouveau_fence_emit(screen->fence.current); 251 else 252 return; 253 } 254 255 nouveau_fence_ref(NULL, &screen->fence.current); 256 257 nouveau_fence_new(screen, &screen->fence.current); 258} 259 260void 261nouveau_fence_unref_bo(void *data) 262{ 263 struct nouveau_bo *bo = data; 264 265 nouveau_bo_ref(NULL, &bo); 266} 267 268bool 269nouveau_fence_work(struct nouveau_fence *fence, 270 void (*func)(void *), void *data) 271{ 272 struct nouveau_fence_work *work; 273 274 if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) { 275 func(data); 276 return true; 277 } 278 279 work = CALLOC_STRUCT(nouveau_fence_work); 280 if (!work) 281 return false; 282 work->func = func; 283 work->data = data; 284 list_add(&work->list, &fence->work); 285 p_atomic_inc(&fence->work_count); 286 if (fence->work_count > 64) 287 nouveau_fence_kick(fence); 288 return true; 289} 290