1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2012 Intel Corporation
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b8e80941Smrg * IN THE SOFTWARE.
22b8e80941Smrg */
23b8e80941Smrg
24b8e80941Smrg/** @file glthread.c
25b8e80941Smrg *
26b8e80941Smrg * Support functions for the glthread feature of Mesa.
27b8e80941Smrg *
28b8e80941Smrg * In multicore systems, many applications end up CPU-bound with about half
29b8e80941Smrg * their time spent inside their rendering thread and half inside Mesa.  To
30b8e80941Smrg * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31b8e80941Smrg * quickly logs the GL commands to a buffer to be processed by a worker
32b8e80941Smrg * thread.
33b8e80941Smrg */
34b8e80941Smrg
35b8e80941Smrg#include "main/mtypes.h"
36b8e80941Smrg#include "main/glthread.h"
37b8e80941Smrg#include "main/marshal.h"
38b8e80941Smrg#include "main/marshal_generated.h"
39b8e80941Smrg#include "util/u_atomic.h"
40b8e80941Smrg#include "util/u_thread.h"
41b8e80941Smrg
42b8e80941Smrg
43b8e80941Smrgstatic void
44b8e80941Smrgglthread_unmarshal_batch(void *job, int thread_index)
45b8e80941Smrg{
46b8e80941Smrg   struct glthread_batch *batch = (struct glthread_batch*)job;
47b8e80941Smrg   struct gl_context *ctx = batch->ctx;
48b8e80941Smrg   size_t pos = 0;
49b8e80941Smrg
50b8e80941Smrg   _glapi_set_dispatch(ctx->CurrentServerDispatch);
51b8e80941Smrg
52b8e80941Smrg   while (pos < batch->used)
53b8e80941Smrg      pos += _mesa_unmarshal_dispatch_cmd(ctx, &batch->buffer[pos]);
54b8e80941Smrg
55b8e80941Smrg   assert(pos == batch->used);
56b8e80941Smrg   batch->used = 0;
57b8e80941Smrg}
58b8e80941Smrg
59b8e80941Smrgstatic void
60b8e80941Smrgglthread_thread_initialization(void *job, int thread_index)
61b8e80941Smrg{
62b8e80941Smrg   struct gl_context *ctx = (struct gl_context*)job;
63b8e80941Smrg
64b8e80941Smrg   ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread->stats);
65b8e80941Smrg   _glapi_set_context(ctx);
66b8e80941Smrg}
67b8e80941Smrg
68b8e80941Smrgvoid
69b8e80941Smrg_mesa_glthread_init(struct gl_context *ctx)
70b8e80941Smrg{
71b8e80941Smrg   struct glthread_state *glthread = calloc(1, sizeof(*glthread));
72b8e80941Smrg
73b8e80941Smrg   if (!glthread)
74b8e80941Smrg      return;
75b8e80941Smrg
76b8e80941Smrg   if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
77b8e80941Smrg                        1, 0)) {
78b8e80941Smrg      free(glthread);
79b8e80941Smrg      return;
80b8e80941Smrg   }
81b8e80941Smrg
82b8e80941Smrg   ctx->MarshalExec = _mesa_create_marshal_table(ctx);
83b8e80941Smrg   if (!ctx->MarshalExec) {
84b8e80941Smrg      util_queue_destroy(&glthread->queue);
85b8e80941Smrg      free(glthread);
86b8e80941Smrg      return;
87b8e80941Smrg   }
88b8e80941Smrg
89b8e80941Smrg   for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
90b8e80941Smrg      glthread->batches[i].ctx = ctx;
91b8e80941Smrg      util_queue_fence_init(&glthread->batches[i].fence);
92b8e80941Smrg   }
93b8e80941Smrg
94b8e80941Smrg   glthread->stats.queue = &glthread->queue;
95b8e80941Smrg   ctx->CurrentClientDispatch = ctx->MarshalExec;
96b8e80941Smrg   ctx->GLThread = glthread;
97b8e80941Smrg
98b8e80941Smrg   /* Execute the thread initialization function in the thread. */
99b8e80941Smrg   struct util_queue_fence fence;
100b8e80941Smrg   util_queue_fence_init(&fence);
101b8e80941Smrg   util_queue_add_job(&glthread->queue, ctx, &fence,
102b8e80941Smrg                      glthread_thread_initialization, NULL);
103b8e80941Smrg   util_queue_fence_wait(&fence);
104b8e80941Smrg   util_queue_fence_destroy(&fence);
105b8e80941Smrg}
106b8e80941Smrg
107b8e80941Smrgvoid
108b8e80941Smrg_mesa_glthread_destroy(struct gl_context *ctx)
109b8e80941Smrg{
110b8e80941Smrg   struct glthread_state *glthread = ctx->GLThread;
111b8e80941Smrg
112b8e80941Smrg   if (!glthread)
113b8e80941Smrg      return;
114b8e80941Smrg
115b8e80941Smrg   _mesa_glthread_finish(ctx);
116b8e80941Smrg   util_queue_destroy(&glthread->queue);
117b8e80941Smrg
118b8e80941Smrg   for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
119b8e80941Smrg      util_queue_fence_destroy(&glthread->batches[i].fence);
120b8e80941Smrg
121b8e80941Smrg   free(glthread);
122b8e80941Smrg   ctx->GLThread = NULL;
123b8e80941Smrg
124b8e80941Smrg   _mesa_glthread_restore_dispatch(ctx, "destroy");
125b8e80941Smrg}
126b8e80941Smrg
127b8e80941Smrgvoid
128b8e80941Smrg_mesa_glthread_restore_dispatch(struct gl_context *ctx, const char *func)
129b8e80941Smrg{
130b8e80941Smrg   /* Remove ourselves from the dispatch table except if another ctx/thread
131b8e80941Smrg    * already installed a new dispatch table.
132b8e80941Smrg    *
133b8e80941Smrg    * Typically glxMakeCurrent will bind a new context (install new table) then
134b8e80941Smrg    * old context might be deleted.
135b8e80941Smrg    */
136b8e80941Smrg   if (_glapi_get_dispatch() == ctx->MarshalExec) {
137b8e80941Smrg       ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
138b8e80941Smrg       _glapi_set_dispatch(ctx->CurrentClientDispatch);
139b8e80941Smrg#if 0
140b8e80941Smrg       printf("glthread disabled: %s\n", func);
141b8e80941Smrg#endif
142b8e80941Smrg   }
143b8e80941Smrg}
144b8e80941Smrg
145b8e80941Smrgvoid
146b8e80941Smrg_mesa_glthread_flush_batch(struct gl_context *ctx)
147b8e80941Smrg{
148b8e80941Smrg   struct glthread_state *glthread = ctx->GLThread;
149b8e80941Smrg   if (!glthread)
150b8e80941Smrg      return;
151b8e80941Smrg
152b8e80941Smrg   struct glthread_batch *next = &glthread->batches[glthread->next];
153b8e80941Smrg   if (!next->used)
154b8e80941Smrg      return;
155b8e80941Smrg
156b8e80941Smrg   /* Debug: execute the batch immediately from this thread.
157b8e80941Smrg    *
158b8e80941Smrg    * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
159b8e80941Smrg    * need to restore it when it returns.
160b8e80941Smrg    */
161b8e80941Smrg   if (false) {
162b8e80941Smrg      glthread_unmarshal_batch(next, 0);
163b8e80941Smrg      _glapi_set_dispatch(ctx->CurrentClientDispatch);
164b8e80941Smrg      return;
165b8e80941Smrg   }
166b8e80941Smrg
167b8e80941Smrg   p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
168b8e80941Smrg
169b8e80941Smrg   util_queue_add_job(&glthread->queue, next, &next->fence,
170b8e80941Smrg                      glthread_unmarshal_batch, NULL);
171b8e80941Smrg   glthread->last = glthread->next;
172b8e80941Smrg   glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
173b8e80941Smrg}
174b8e80941Smrg
175b8e80941Smrg/**
176b8e80941Smrg * Waits for all pending batches have been unmarshaled.
177b8e80941Smrg *
178b8e80941Smrg * This can be used by the main thread to synchronize access to the context,
179b8e80941Smrg * since the worker thread will be idle after this.
180b8e80941Smrg */
181b8e80941Smrgvoid
182b8e80941Smrg_mesa_glthread_finish(struct gl_context *ctx)
183b8e80941Smrg{
184b8e80941Smrg   struct glthread_state *glthread = ctx->GLThread;
185b8e80941Smrg   if (!glthread)
186b8e80941Smrg      return;
187b8e80941Smrg
188b8e80941Smrg   /* If this is called from the worker thread, then we've hit a path that
189b8e80941Smrg    * might be called from either the main thread or the worker (such as some
190b8e80941Smrg    * dri interface entrypoints), in which case we don't need to actually
191b8e80941Smrg    * synchronize against ourself.
192b8e80941Smrg    */
193b8e80941Smrg   if (u_thread_is_self(glthread->queue.threads[0]))
194b8e80941Smrg      return;
195b8e80941Smrg
196b8e80941Smrg   struct glthread_batch *last = &glthread->batches[glthread->last];
197b8e80941Smrg   struct glthread_batch *next = &glthread->batches[glthread->next];
198b8e80941Smrg   bool synced = false;
199b8e80941Smrg
200b8e80941Smrg   if (!util_queue_fence_is_signalled(&last->fence)) {
201b8e80941Smrg      util_queue_fence_wait(&last->fence);
202b8e80941Smrg      synced = true;
203b8e80941Smrg   }
204b8e80941Smrg
205b8e80941Smrg   if (next->used) {
206b8e80941Smrg      p_atomic_add(&glthread->stats.num_direct_items, next->used);
207b8e80941Smrg
208b8e80941Smrg      /* Since glthread_unmarshal_batch changes the dispatch to direct,
209b8e80941Smrg       * restore it after it's done.
210b8e80941Smrg       */
211b8e80941Smrg      struct _glapi_table *dispatch = _glapi_get_dispatch();
212b8e80941Smrg      glthread_unmarshal_batch(next, 0);
213b8e80941Smrg      _glapi_set_dispatch(dispatch);
214b8e80941Smrg
215b8e80941Smrg      /* It's not a sync because we don't enqueue partial batches, but
216b8e80941Smrg       * it would be a sync if we did. So count it anyway.
217b8e80941Smrg       */
218b8e80941Smrg      synced = true;
219b8e80941Smrg   }
220b8e80941Smrg
221b8e80941Smrg   if (synced)
222b8e80941Smrg      p_atomic_inc(&glthread->stats.num_syncs);
223b8e80941Smrg}
224