17ec681f3Smrg/* 27ec681f3Smrg * Copyright 2018 Collabora Ltd. 37ec681f3Smrg * 47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a 57ec681f3Smrg * copy of this software and associated documentation files (the "Software"), 67ec681f3Smrg * to deal in the Software without restriction, including without limitation 77ec681f3Smrg * on the rights to use, copy, modify, merge, publish, distribute, sub 87ec681f3Smrg * license, and/or sell copies of the Software, and to permit persons to whom 97ec681f3Smrg * the Software is furnished to do so, subject to the following conditions: 107ec681f3Smrg * 117ec681f3Smrg * The above copyright notice and this permission notice (including the next 127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the 137ec681f3Smrg * Software. 147ec681f3Smrg * 157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 187ec681f3Smrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 197ec681f3Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 207ec681f3Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 217ec681f3Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE. 227ec681f3Smrg */ 237ec681f3Smrg 247ec681f3Smrg#include "zink_batch.h" 257ec681f3Smrg#include "zink_context.h" 267ec681f3Smrg#include "zink_fence.h" 277ec681f3Smrg 287ec681f3Smrg#include "zink_resource.h" 297ec681f3Smrg#include "zink_screen.h" 307ec681f3Smrg 317ec681f3Smrg#include "util/set.h" 327ec681f3Smrg#include "util/u_memory.h" 337ec681f3Smrg 347ec681f3Smrgstatic void 357ec681f3Smrgdestroy_fence(struct zink_screen *screen, struct zink_tc_fence *mfence) 367ec681f3Smrg{ 377ec681f3Smrg mfence->fence = NULL; 387ec681f3Smrg tc_unflushed_batch_token_reference(&mfence->tc_token, NULL); 397ec681f3Smrg FREE(mfence); 407ec681f3Smrg} 417ec681f3Smrg 427ec681f3Smrgstruct zink_tc_fence * 437ec681f3Smrgzink_create_tc_fence(void) 447ec681f3Smrg{ 457ec681f3Smrg struct zink_tc_fence *mfence = CALLOC_STRUCT(zink_tc_fence); 467ec681f3Smrg if (!mfence) 477ec681f3Smrg return NULL; 487ec681f3Smrg pipe_reference_init(&mfence->reference, 1); 497ec681f3Smrg util_queue_fence_init(&mfence->ready); 507ec681f3Smrg return mfence; 517ec681f3Smrg} 527ec681f3Smrg 537ec681f3Smrgstruct pipe_fence_handle * 547ec681f3Smrgzink_create_tc_fence_for_tc(struct pipe_context *pctx, struct tc_unflushed_batch_token *tc_token) 557ec681f3Smrg{ 567ec681f3Smrg struct zink_tc_fence *mfence = zink_create_tc_fence(); 577ec681f3Smrg if (!mfence) 587ec681f3Smrg return NULL; 597ec681f3Smrg util_queue_fence_reset(&mfence->ready); 607ec681f3Smrg tc_unflushed_batch_token_reference(&mfence->tc_token, tc_token); 617ec681f3Smrg return (struct pipe_fence_handle*)mfence; 627ec681f3Smrg} 637ec681f3Smrg 647ec681f3Smrgvoid 657ec681f3Smrgzink_fence_reference(struct zink_screen *screen, 667ec681f3Smrg struct zink_tc_fence **ptr, 677ec681f3Smrg struct zink_tc_fence *mfence) 687ec681f3Smrg{ 697ec681f3Smrg if (pipe_reference(&(*ptr)->reference, &mfence->reference)) 707ec681f3Smrg destroy_fence(screen, *ptr); 717ec681f3Smrg 727ec681f3Smrg *ptr = mfence; 737ec681f3Smrg} 747ec681f3Smrg 757ec681f3Smrgstatic void 767ec681f3Smrgfence_reference(struct pipe_screen *pscreen, 777ec681f3Smrg struct pipe_fence_handle **pptr, 787ec681f3Smrg struct pipe_fence_handle *pfence) 797ec681f3Smrg{ 807ec681f3Smrg zink_fence_reference(zink_screen(pscreen), (struct zink_tc_fence **)pptr, 817ec681f3Smrg zink_tc_fence(pfence)); 827ec681f3Smrg} 837ec681f3Smrg 847ec681f3Smrgstatic bool 857ec681f3Smrgtc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t *timeout_ns) 867ec681f3Smrg{ 877ec681f3Smrg if (!util_queue_fence_is_signalled(&mfence->ready)) { 887ec681f3Smrg int64_t abs_timeout = os_time_get_absolute_timeout(*timeout_ns); 897ec681f3Smrg if (mfence->tc_token) { 907ec681f3Smrg /* Ensure that zink_flush will be called for 917ec681f3Smrg * this mfence, but only if we're in the API thread 927ec681f3Smrg * where the context is current. 937ec681f3Smrg * 947ec681f3Smrg * Note that the batch containing the flush may already 957ec681f3Smrg * be in flight in the driver thread, so the mfence 967ec681f3Smrg * may not be ready yet when this call returns. 977ec681f3Smrg */ 987ec681f3Smrg threaded_context_flush(&ctx->base, mfence->tc_token, *timeout_ns == 0); 997ec681f3Smrg } 1007ec681f3Smrg 1017ec681f3Smrg /* this is a tc mfence, so we're just waiting on the queue mfence to complete 1027ec681f3Smrg * after being signaled by the real mfence 1037ec681f3Smrg */ 1047ec681f3Smrg if (*timeout_ns == PIPE_TIMEOUT_INFINITE) { 1057ec681f3Smrg util_queue_fence_wait(&mfence->ready); 1067ec681f3Smrg } else { 1077ec681f3Smrg if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout)) 1087ec681f3Smrg return false; 1097ec681f3Smrg } 1107ec681f3Smrg if (*timeout_ns && *timeout_ns != PIPE_TIMEOUT_INFINITE) { 1117ec681f3Smrg int64_t time_ns = os_time_get_nano(); 1127ec681f3Smrg *timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0; 1137ec681f3Smrg } 1147ec681f3Smrg } 1157ec681f3Smrg 1167ec681f3Smrg return true; 1177ec681f3Smrg} 1187ec681f3Smrg 1197ec681f3Smrgbool 1207ec681f3Smrgzink_vkfence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns) 1217ec681f3Smrg{ 1227ec681f3Smrg if (screen->device_lost) 1237ec681f3Smrg return true; 1247ec681f3Smrg if (p_atomic_read(&fence->completed)) 1257ec681f3Smrg return true; 1267ec681f3Smrg 1277ec681f3Smrg assert(fence->batch_id); 1287ec681f3Smrg assert(fence->submitted); 1297ec681f3Smrg 1307ec681f3Smrg bool success = false; 1317ec681f3Smrg 1327ec681f3Smrg VkResult ret; 1337ec681f3Smrg if (timeout_ns) 1347ec681f3Smrg ret = VKSCR(WaitForFences)(screen->dev, 1, &fence->fence, VK_TRUE, timeout_ns); 1357ec681f3Smrg else 1367ec681f3Smrg ret = VKSCR(GetFenceStatus)(screen->dev, fence->fence); 1377ec681f3Smrg success = zink_screen_handle_vkresult(screen, ret); 1387ec681f3Smrg 1397ec681f3Smrg if (success) { 1407ec681f3Smrg p_atomic_set(&fence->completed, true); 1417ec681f3Smrg zink_batch_state(fence)->usage.usage = 0; 1427ec681f3Smrg zink_screen_update_last_finished(screen, fence->batch_id); 1437ec681f3Smrg } 1447ec681f3Smrg return success; 1457ec681f3Smrg} 1467ec681f3Smrg 1477ec681f3Smrgstatic bool 1487ec681f3Smrgzink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence, 1497ec681f3Smrg uint64_t timeout_ns) 1507ec681f3Smrg{ 1517ec681f3Smrg pctx = threaded_context_unwrap_sync(pctx); 1527ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 1537ec681f3Smrg 1547ec681f3Smrg if (screen->device_lost) 1557ec681f3Smrg return true; 1567ec681f3Smrg 1577ec681f3Smrg if (pctx && mfence->deferred_ctx == pctx) { 1587ec681f3Smrg if (mfence->fence == ctx->deferred_fence) { 1597ec681f3Smrg zink_context(pctx)->batch.has_work = true; 1607ec681f3Smrg /* this must be the current batch */ 1617ec681f3Smrg pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0); 1627ec681f3Smrg if (!timeout_ns) 1637ec681f3Smrg return false; 1647ec681f3Smrg } 1657ec681f3Smrg } 1667ec681f3Smrg 1677ec681f3Smrg /* need to ensure the tc mfence has been flushed before we wait */ 1687ec681f3Smrg bool tc_finish = tc_fence_finish(ctx, mfence, &timeout_ns); 1697ec681f3Smrg /* the submit thread hasn't finished yet */ 1707ec681f3Smrg if (!tc_finish) 1717ec681f3Smrg return false; 1727ec681f3Smrg /* this was an invalid flush, just return completed */ 1737ec681f3Smrg if (!mfence->fence) 1747ec681f3Smrg return true; 1757ec681f3Smrg 1767ec681f3Smrg struct zink_fence *fence = mfence->fence; 1777ec681f3Smrg 1787ec681f3Smrg unsigned submit_diff = zink_batch_state(mfence->fence)->submit_count - mfence->submit_count; 1797ec681f3Smrg /* this batch is known to have finished because it has been submitted more than 1 time 1807ec681f3Smrg * since the tc fence last saw it 1817ec681f3Smrg */ 1827ec681f3Smrg if (submit_diff > 1) 1837ec681f3Smrg return true; 1847ec681f3Smrg 1857ec681f3Smrg if (fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id)) 1867ec681f3Smrg return true; 1877ec681f3Smrg 1887ec681f3Smrg return zink_vkfence_wait(screen, fence, timeout_ns); 1897ec681f3Smrg} 1907ec681f3Smrg 1917ec681f3Smrgstatic bool 1927ec681f3Smrgfence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx, 1937ec681f3Smrg struct pipe_fence_handle *pfence, uint64_t timeout_ns) 1947ec681f3Smrg{ 1957ec681f3Smrg return zink_fence_finish(zink_screen(pscreen), pctx, zink_tc_fence(pfence), 1967ec681f3Smrg timeout_ns); 1977ec681f3Smrg} 1987ec681f3Smrg 1997ec681f3Smrgvoid 2007ec681f3Smrgzink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfence) 2017ec681f3Smrg{ 2027ec681f3Smrg struct zink_tc_fence *mfence = zink_tc_fence(pfence); 2037ec681f3Smrg 2047ec681f3Smrg if (mfence->deferred_ctx == pctx) 2057ec681f3Smrg return; 2067ec681f3Smrg 2077ec681f3Smrg if (mfence->deferred_ctx) { 2087ec681f3Smrg zink_context(pctx)->batch.has_work = true; 2097ec681f3Smrg /* this must be the current batch */ 2107ec681f3Smrg pctx->flush(pctx, NULL, 0); 2117ec681f3Smrg } 2127ec681f3Smrg zink_fence_finish(zink_screen(pctx->screen), pctx, mfence, PIPE_TIMEOUT_INFINITE); 2137ec681f3Smrg} 2147ec681f3Smrg 2157ec681f3Smrgvoid 2167ec681f3Smrgzink_screen_fence_init(struct pipe_screen *pscreen) 2177ec681f3Smrg{ 2187ec681f3Smrg pscreen->fence_reference = fence_reference; 2197ec681f3Smrg pscreen->fence_finish = fence_finish; 2207ec681f3Smrg} 221