1/* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/** @file glthread.c 25 * 26 * Support functions for the glthread feature of Mesa. 27 * 28 * In multicore systems, many applications end up CPU-bound with about half 29 * their time spent inside their rendering thread and half inside Mesa. To 30 * alleviate this, we put a shim layer in Mesa at the GL dispatch level that 31 * quickly logs the GL commands to a buffer to be processed by a worker 32 * thread. 33 */ 34 35#include "main/mtypes.h" 36#include "main/glthread.h" 37#include "main/marshal.h" 38#include "main/marshal_generated.h" 39#include "util/u_atomic.h" 40#include "util/u_thread.h" 41 42 43static void 44glthread_unmarshal_batch(void *job, int thread_index) 45{ 46 struct glthread_batch *batch = (struct glthread_batch*)job; 47 struct gl_context *ctx = batch->ctx; 48 size_t pos = 0; 49 50 _glapi_set_dispatch(ctx->CurrentServerDispatch); 51 52 while (pos < batch->used) 53 pos += _mesa_unmarshal_dispatch_cmd(ctx, &batch->buffer[pos]); 54 55 assert(pos == batch->used); 56 batch->used = 0; 57} 58 59static void 60glthread_thread_initialization(void *job, int thread_index) 61{ 62 struct gl_context *ctx = (struct gl_context*)job; 63 64 ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread->stats); 65 _glapi_set_context(ctx); 66} 67 68void 69_mesa_glthread_init(struct gl_context *ctx) 70{ 71 struct glthread_state *glthread = calloc(1, sizeof(*glthread)); 72 73 if (!glthread) 74 return; 75 76 if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2, 77 1, 0)) { 78 free(glthread); 79 return; 80 } 81 82 ctx->MarshalExec = _mesa_create_marshal_table(ctx); 83 if (!ctx->MarshalExec) { 84 util_queue_destroy(&glthread->queue); 85 free(glthread); 86 return; 87 } 88 89 for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) { 90 glthread->batches[i].ctx = ctx; 91 util_queue_fence_init(&glthread->batches[i].fence); 92 } 93 94 glthread->stats.queue = &glthread->queue; 95 ctx->CurrentClientDispatch = ctx->MarshalExec; 96 ctx->GLThread = glthread; 97 98 /* Execute the thread initialization function in the thread. */ 99 struct util_queue_fence fence; 100 util_queue_fence_init(&fence); 101 util_queue_add_job(&glthread->queue, ctx, &fence, 102 glthread_thread_initialization, NULL); 103 util_queue_fence_wait(&fence); 104 util_queue_fence_destroy(&fence); 105} 106 107void 108_mesa_glthread_destroy(struct gl_context *ctx) 109{ 110 struct glthread_state *glthread = ctx->GLThread; 111 112 if (!glthread) 113 return; 114 115 _mesa_glthread_finish(ctx); 116 util_queue_destroy(&glthread->queue); 117 118 for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) 119 util_queue_fence_destroy(&glthread->batches[i].fence); 120 121 free(glthread); 122 ctx->GLThread = NULL; 123 124 _mesa_glthread_restore_dispatch(ctx, "destroy"); 125} 126 127void 128_mesa_glthread_restore_dispatch(struct gl_context *ctx, const char *func) 129{ 130 /* Remove ourselves from the dispatch table except if another ctx/thread 131 * already installed a new dispatch table. 132 * 133 * Typically glxMakeCurrent will bind a new context (install new table) then 134 * old context might be deleted. 135 */ 136 if (_glapi_get_dispatch() == ctx->MarshalExec) { 137 ctx->CurrentClientDispatch = ctx->CurrentServerDispatch; 138 _glapi_set_dispatch(ctx->CurrentClientDispatch); 139#if 0 140 printf("glthread disabled: %s\n", func); 141#endif 142 } 143} 144 145void 146_mesa_glthread_flush_batch(struct gl_context *ctx) 147{ 148 struct glthread_state *glthread = ctx->GLThread; 149 if (!glthread) 150 return; 151 152 struct glthread_batch *next = &glthread->batches[glthread->next]; 153 if (!next->used) 154 return; 155 156 /* Debug: execute the batch immediately from this thread. 157 * 158 * Note that glthread_unmarshal_batch() changes the dispatch table so we'll 159 * need to restore it when it returns. 160 */ 161 if (false) { 162 glthread_unmarshal_batch(next, 0); 163 _glapi_set_dispatch(ctx->CurrentClientDispatch); 164 return; 165 } 166 167 p_atomic_add(&glthread->stats.num_offloaded_items, next->used); 168 169 util_queue_add_job(&glthread->queue, next, &next->fence, 170 glthread_unmarshal_batch, NULL); 171 glthread->last = glthread->next; 172 glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES; 173} 174 175/** 176 * Waits for all pending batches have been unmarshaled. 177 * 178 * This can be used by the main thread to synchronize access to the context, 179 * since the worker thread will be idle after this. 180 */ 181void 182_mesa_glthread_finish(struct gl_context *ctx) 183{ 184 struct glthread_state *glthread = ctx->GLThread; 185 if (!glthread) 186 return; 187 188 /* If this is called from the worker thread, then we've hit a path that 189 * might be called from either the main thread or the worker (such as some 190 * dri interface entrypoints), in which case we don't need to actually 191 * synchronize against ourself. 192 */ 193 if (u_thread_is_self(glthread->queue.threads[0])) 194 return; 195 196 struct glthread_batch *last = &glthread->batches[glthread->last]; 197 struct glthread_batch *next = &glthread->batches[glthread->next]; 198 bool synced = false; 199 200 if (!util_queue_fence_is_signalled(&last->fence)) { 201 util_queue_fence_wait(&last->fence); 202 synced = true; 203 } 204 205 if (next->used) { 206 p_atomic_add(&glthread->stats.num_direct_items, next->used); 207 208 /* Since glthread_unmarshal_batch changes the dispatch to direct, 209 * restore it after it's done. 210 */ 211 struct _glapi_table *dispatch = _glapi_get_dispatch(); 212 glthread_unmarshal_batch(next, 0); 213 _glapi_set_dispatch(dispatch); 214 215 /* It's not a sync because we don't enqueue partial batches, but 216 * it would be a sync if we did. So count it anyway. 217 */ 218 synced = true; 219 } 220 221 if (synced) 222 p_atomic_inc(&glthread->stats.num_syncs); 223} 224