1/*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "freedreno_context.h"
28#include "ir3/ir3_cache.h"
29#include "util/u_upload_mgr.h"
30#include "freedreno_blitter.h"
31#include "freedreno_draw.h"
32#include "freedreno_fence.h"
33#include "freedreno_gmem.h"
34#include "freedreno_program.h"
35#include "freedreno_query.h"
36#include "freedreno_query_hw.h"
37#include "freedreno_resource.h"
38#include "freedreno_state.h"
39#include "freedreno_texture.h"
40#include "freedreno_util.h"
41#include "util/u_trace_gallium.h"
42
43static void
44fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
45                 unsigned flags) in_dt
46{
47   struct fd_context *ctx = fd_context(pctx);
48   struct pipe_fence_handle *fence = NULL;
49   struct fd_batch *batch = NULL;
50
51   /* We want to lookup current batch if it exists, but not create a new
52    * one if not (unless we need a fence)
53    */
54   fd_batch_reference(&batch, ctx->batch);
55
56   DBG("%p: flush: flags=%x, fencep=%p", batch, flags, fencep);
57
58   if (fencep && !batch) {
59      batch = fd_context_batch(ctx);
60   } else if (!batch) {
61      if (ctx->screen->reorder)
62         fd_bc_flush(ctx, flags & PIPE_FLUSH_DEFERRED);
63      fd_bc_dump(ctx, "%p: NULL batch, remaining:\n", ctx);
64      return;
65   }
66
67   /* With TC_FLUSH_ASYNC, the fence will have been pre-created from
68    * the front-end thread.  But not yet associated with a batch,
69    * because we cannot safely access ctx->batch outside of the driver
70    * thread.  So instead, replace the existing batch->fence with the
71    * one created earlier
72    */
73   if ((flags & TC_FLUSH_ASYNC) && fencep) {
74      /* We don't currently expect async+flush in the fence-fd
75       * case.. for that to work properly we'd need TC to tell
76       * us in the create_fence callback that it needs an fd.
77       */
78      assert(!(flags & PIPE_FLUSH_FENCE_FD));
79
80      fd_fence_set_batch(*fencep, batch);
81      fd_fence_ref(&batch->fence, *fencep);
82
83      /* If we have nothing to flush, update the pre-created unflushed
84       * fence with the current state of the last-fence:
85       */
86      if (ctx->last_fence) {
87         fd_fence_repopulate(*fencep, ctx->last_fence);
88         fd_fence_ref(&fence, *fencep);
89         fd_bc_dump(ctx, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
90         goto out;
91      }
92
93      /* async flush is not compatible with deferred flush, since
94       * nothing triggers the batch flush which fence_flush() would
95       * be waiting for
96       */
97      flags &= ~PIPE_FLUSH_DEFERRED;
98   } else if (!batch->fence) {
99      batch->fence = fd_fence_create(batch);
100   }
101
102   /* In some sequence of events, we can end up with a last_fence that is
103    * not an "fd" fence, which results in eglDupNativeFenceFDANDROID()
104    * errors.
105    */
106   if ((flags & PIPE_FLUSH_FENCE_FD) && ctx->last_fence &&
107       !fd_fence_is_fd(ctx->last_fence))
108      fd_fence_ref(&ctx->last_fence, NULL);
109
110   /* if no rendering since last flush, ie. app just decided it needed
111    * a fence, re-use the last one:
112    */
113   if (ctx->last_fence) {
114      fd_fence_ref(&fence, ctx->last_fence);
115      fd_bc_dump(ctx, "%p: reuse last_fence, remaining:\n", ctx);
116      goto out;
117   }
118
119   /* Take a ref to the batch's fence (batch can be unref'd when flushed: */
120   fd_fence_ref(&fence, batch->fence);
121
122   if (flags & PIPE_FLUSH_FENCE_FD)
123      fence->submit_fence.use_fence_fd = true;
124
125   fd_bc_dump(ctx, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx,
126              batch, batch->seqno, flags);
127
128   /* If we get here, we need to flush for a fence, even if there is
129    * no rendering yet:
130    */
131   batch->needs_flush = true;
132
133   if (!ctx->screen->reorder) {
134      fd_batch_flush(batch);
135   } else {
136      fd_bc_flush(ctx, flags & PIPE_FLUSH_DEFERRED);
137   }
138
139   fd_bc_dump(ctx, "%p: remaining:\n", ctx);
140
141out:
142   if (fencep)
143      fd_fence_ref(fencep, fence);
144
145   fd_fence_ref(&ctx->last_fence, fence);
146
147   fd_fence_ref(&fence, NULL);
148
149   fd_batch_reference(&batch, NULL);
150
151   u_trace_context_process(&ctx->trace_context,
152                           !!(flags & PIPE_FLUSH_END_OF_FRAME));
153}
154
155static void
156fd_texture_barrier(struct pipe_context *pctx, unsigned flags) in_dt
157{
158   if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) {
159      struct fd_context *ctx = fd_context(pctx);
160
161      if (ctx->framebuffer_barrier) {
162         ctx->framebuffer_barrier(ctx);
163         return;
164      }
165   }
166
167   /* On devices that could sample from GMEM we could possibly do better.
168    * Or if we knew that we were doing GMEM bypass we could just emit a
169    * cache flush, perhaps?  But we don't know if future draws would cause
170    * us to use GMEM, and a flush in bypass isn't the end of the world.
171    */
172   fd_context_flush(pctx, NULL, 0);
173}
174
175static void
176fd_memory_barrier(struct pipe_context *pctx, unsigned flags)
177{
178   if (!(flags & ~PIPE_BARRIER_UPDATE))
179      return;
180
181   fd_context_flush(pctx, NULL, 0);
182
183   /* TODO do we need to check for persistently mapped buffers and
184    * fd_bo_cpu_prep()??
185    */
186}
187
188static void
189emit_string_tail(struct fd_ringbuffer *ring, const char *string, int len)
190{
191   const uint32_t *buf = (const void *)string;
192
193   while (len >= 4) {
194      OUT_RING(ring, *buf);
195      buf++;
196      len -= 4;
197   }
198
199   /* copy remainder bytes without reading past end of input string: */
200   if (len > 0) {
201      uint32_t w = 0;
202      memcpy(&w, buf, len);
203      OUT_RING(ring, w);
204   }
205}
206
207/* for prior to a5xx: */
208void
209fd_emit_string(struct fd_ringbuffer *ring, const char *string, int len)
210{
211   /* max packet size is 0x3fff+1 dwords: */
212   len = MIN2(len, 0x4000 * 4);
213
214   OUT_PKT3(ring, CP_NOP, align(len, 4) / 4);
215   emit_string_tail(ring, string, len);
216}
217
218/* for a5xx+ */
219void
220fd_emit_string5(struct fd_ringbuffer *ring, const char *string, int len)
221{
222   /* max packet size is 0x3fff dwords: */
223   len = MIN2(len, 0x3fff * 4);
224
225   OUT_PKT7(ring, CP_NOP, align(len, 4) / 4);
226   emit_string_tail(ring, string, len);
227}
228
229/**
230 * emit marker string as payload of a no-op packet, which can be
231 * decoded by cffdump.
232 */
233static void
234fd_emit_string_marker(struct pipe_context *pctx, const char *string,
235                      int len) in_dt
236{
237   struct fd_context *ctx = fd_context(pctx);
238
239   DBG("%.*s", len, string);
240
241   if (!ctx->batch)
242      return;
243
244   struct fd_batch *batch = fd_context_batch_locked(ctx);
245
246   fd_batch_needs_flush(batch);
247
248   if (ctx->screen->gen >= 5) {
249      fd_emit_string5(batch->draw, string, len);
250   } else {
251      fd_emit_string(batch->draw, string, len);
252   }
253
254   fd_batch_unlock_submit(batch);
255   fd_batch_reference(&batch, NULL);
256}
257
258/**
259 * If we have a pending fence_server_sync() (GPU side sync), flush now.
260 * The alternative to try to track this with batch dependencies gets
261 * hairy quickly.
262 *
263 * Call this before switching to a different batch, to handle this case.
264 */
265void
266fd_context_switch_from(struct fd_context *ctx)
267{
268   if (ctx->batch && (ctx->batch->in_fence_fd != -1))
269      fd_batch_flush(ctx->batch);
270}
271
272/**
273 * If there is a pending fence-fd that we need to sync on, this will
274 * transfer the reference to the next batch we are going to render
275 * to.
276 */
277void
278fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch)
279{
280   if (ctx->in_fence_fd != -1) {
281      sync_accumulate("freedreno", &batch->in_fence_fd, ctx->in_fence_fd);
282      close(ctx->in_fence_fd);
283      ctx->in_fence_fd = -1;
284   }
285}
286
287/**
288 * Return a reference to the current batch, caller must unref.
289 */
290struct fd_batch *
291fd_context_batch(struct fd_context *ctx)
292{
293   struct fd_batch *batch = NULL;
294
295   tc_assert_driver_thread(ctx->tc);
296
297   fd_batch_reference(&batch, ctx->batch);
298
299   if (unlikely(!batch)) {
300      batch =
301         fd_batch_from_fb(ctx, &ctx->framebuffer);
302      util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
303      fd_batch_reference(&ctx->batch, batch);
304      fd_context_all_dirty(ctx);
305   }
306   fd_context_switch_to(ctx, batch);
307
308   return batch;
309}
310
311/**
312 * Return a locked reference to the current batch.  A batch with emit
313 * lock held is protected against flushing while the lock is held.
314 * The emit-lock should be acquired before screen-lock.  The emit-lock
315 * should be held while emitting cmdstream.
316 */
317struct fd_batch *
318fd_context_batch_locked(struct fd_context *ctx)
319{
320   struct fd_batch *batch = NULL;
321
322   while (!batch) {
323      batch = fd_context_batch(ctx);
324      if (!fd_batch_lock_submit(batch)) {
325         fd_batch_reference(&batch, NULL);
326      }
327   }
328
329   return batch;
330}
331
332void
333fd_context_destroy(struct pipe_context *pctx)
334{
335   struct fd_context *ctx = fd_context(pctx);
336   unsigned i;
337
338   DBG("");
339
340   fd_screen_lock(ctx->screen);
341   list_del(&ctx->node);
342   fd_screen_unlock(ctx->screen);
343
344   fd_fence_ref(&ctx->last_fence, NULL);
345
346   if (ctx->in_fence_fd != -1)
347      close(ctx->in_fence_fd);
348
349   for (i = 0; i < ARRAY_SIZE(ctx->pvtmem); i++) {
350      if (ctx->pvtmem[i].bo)
351         fd_bo_del(ctx->pvtmem[i].bo);
352   }
353
354   util_copy_framebuffer_state(&ctx->framebuffer, NULL);
355   fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
356
357   /* Make sure nothing in the batch cache references our context any more. */
358   fd_bc_flush(ctx, false);
359
360   fd_prog_fini(pctx);
361
362   if (ctx->blitter)
363      util_blitter_destroy(ctx->blitter);
364
365   if (pctx->stream_uploader)
366      u_upload_destroy(pctx->stream_uploader);
367
368   for (i = 0; i < ARRAY_SIZE(ctx->clear_rs_state); i++)
369      if (ctx->clear_rs_state[i])
370         pctx->delete_rasterizer_state(pctx, ctx->clear_rs_state[i]);
371
372   slab_destroy_child(&ctx->transfer_pool);
373   slab_destroy_child(&ctx->transfer_pool_unsync);
374
375   for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe_bo); i++) {
376      if (!ctx->vsc_pipe_bo[i])
377         break;
378      fd_bo_del(ctx->vsc_pipe_bo[i]);
379   }
380
381   fd_device_del(ctx->dev);
382   fd_pipe_purge(ctx->pipe);
383   fd_pipe_del(ctx->pipe);
384
385   simple_mtx_destroy(&ctx->gmem_lock);
386
387   u_trace_context_fini(&ctx->trace_context);
388
389   fd_autotune_fini(&ctx->autotune);
390
391   ir3_cache_destroy(ctx->shader_cache);
392
393   if (FD_DBG(BSTAT) || FD_DBG(MSGS)) {
394      mesa_logi(
395         "batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, "
396         "batch_restore=%u\n",
397         (uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem,
398         (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw,
399         (uint32_t)ctx->stats.batch_restore);
400   }
401}
402
403static void
404fd_set_debug_callback(struct pipe_context *pctx,
405                      const struct pipe_debug_callback *cb)
406{
407   struct fd_context *ctx = fd_context(pctx);
408
409   if (cb)
410      ctx->debug = *cb;
411   else
412      memset(&ctx->debug, 0, sizeof(ctx->debug));
413}
414
415static uint32_t
416fd_get_reset_count(struct fd_context *ctx, bool per_context)
417{
418   uint64_t val;
419   enum fd_param_id param = per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS;
420   int ret = fd_pipe_get_param(ctx->pipe, param, &val);
421   debug_assert(!ret);
422   return val;
423}
424
425static enum pipe_reset_status
426fd_get_device_reset_status(struct pipe_context *pctx)
427{
428   struct fd_context *ctx = fd_context(pctx);
429   int context_faults = fd_get_reset_count(ctx, true);
430   int global_faults = fd_get_reset_count(ctx, false);
431   enum pipe_reset_status status;
432
433   if (context_faults != ctx->context_reset_count) {
434      status = PIPE_GUILTY_CONTEXT_RESET;
435   } else if (global_faults != ctx->global_reset_count) {
436      status = PIPE_INNOCENT_CONTEXT_RESET;
437   } else {
438      status = PIPE_NO_RESET;
439   }
440
441   ctx->context_reset_count = context_faults;
442   ctx->global_reset_count = global_faults;
443
444   return status;
445}
446
447static void
448fd_trace_record_ts(struct u_trace *ut, void *cs, void *timestamps,
449                   unsigned idx)
450{
451   struct fd_batch *batch = container_of(ut, struct fd_batch, trace);
452   struct fd_ringbuffer *ring = cs;
453   struct pipe_resource *buffer = timestamps;
454
455   if (ring->cur == batch->last_timestamp_cmd) {
456      uint64_t *ts = fd_bo_map(fd_resource(buffer)->bo);
457      ts[idx] = U_TRACE_NO_TIMESTAMP;
458      return;
459   }
460
461   unsigned ts_offset = idx * sizeof(uint64_t);
462   batch->ctx->record_timestamp(ring, fd_resource(buffer)->bo, ts_offset);
463   batch->last_timestamp_cmd = ring->cur;
464}
465
466static uint64_t
467fd_trace_read_ts(struct u_trace_context *utctx,
468                 void *timestamps, unsigned idx, void *flush_data)
469{
470   struct fd_context *ctx =
471      container_of(utctx, struct fd_context, trace_context);
472   struct pipe_resource *buffer = timestamps;
473   struct fd_bo *ts_bo = fd_resource(buffer)->bo;
474
475   /* Only need to stall on results for the first entry: */
476   if (idx == 0) {
477      /* Avoid triggering deferred submits from flushing, since that
478       * changes the behavior of what we are trying to measure:
479       */
480      while (fd_bo_cpu_prep(ts_bo, ctx->pipe, FD_BO_PREP_NOSYNC))
481         usleep(10000);
482      int ret = fd_bo_cpu_prep(ts_bo, ctx->pipe, FD_BO_PREP_READ);
483      if (ret)
484         return U_TRACE_NO_TIMESTAMP;
485   }
486
487   uint64_t *ts = fd_bo_map(ts_bo);
488
489   /* Don't translate the no-timestamp marker: */
490   if (ts[idx] == U_TRACE_NO_TIMESTAMP)
491      return U_TRACE_NO_TIMESTAMP;
492
493   return ctx->ts_to_ns(ts[idx]);
494}
495
496static void
497fd_trace_delete_flush_data(struct u_trace_context *utctx, void *flush_data)
498{
499   /* We don't use flush_data at the moment. */
500}
501
502/* TODO we could combine a few of these small buffers (solid_vbuf,
503 * blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and
504 * save a tiny bit of memory
505 */
506
507static struct pipe_resource *
508create_solid_vertexbuf(struct pipe_context *pctx)
509{
510   static const float init_shader_const[] = {
511      -1.000000, +1.000000, +1.000000, +1.000000, -1.000000, +1.000000,
512   };
513   struct pipe_resource *prsc =
514      pipe_buffer_create(pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
515                         sizeof(init_shader_const));
516   pipe_buffer_write(pctx, prsc, 0, sizeof(init_shader_const),
517                     init_shader_const);
518   return prsc;
519}
520
521static struct pipe_resource *
522create_blit_texcoord_vertexbuf(struct pipe_context *pctx)
523{
524   struct pipe_resource *prsc = pipe_buffer_create(
525      pctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_DYNAMIC, 16);
526   return prsc;
527}
528
529void
530fd_context_setup_common_vbos(struct fd_context *ctx)
531{
532   struct pipe_context *pctx = &ctx->base;
533
534   ctx->solid_vbuf = create_solid_vertexbuf(pctx);
535   ctx->blit_texcoord_vbuf = create_blit_texcoord_vertexbuf(pctx);
536
537   /* setup solid_vbuf_state: */
538   ctx->solid_vbuf_state.vtx = pctx->create_vertex_elements_state(
539      pctx, 1,
540      (struct pipe_vertex_element[]){{
541         .vertex_buffer_index = 0,
542         .src_offset = 0,
543         .src_format = PIPE_FORMAT_R32G32B32_FLOAT,
544      }});
545   ctx->solid_vbuf_state.vertexbuf.count = 1;
546   ctx->solid_vbuf_state.vertexbuf.vb[0].stride = 12;
547   ctx->solid_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->solid_vbuf;
548
549   /* setup blit_vbuf_state: */
550   ctx->blit_vbuf_state.vtx = pctx->create_vertex_elements_state(
551      pctx, 2,
552      (struct pipe_vertex_element[]){
553         {
554            .vertex_buffer_index = 0,
555            .src_offset = 0,
556            .src_format = PIPE_FORMAT_R32G32_FLOAT,
557         },
558         {
559            .vertex_buffer_index = 1,
560            .src_offset = 0,
561            .src_format = PIPE_FORMAT_R32G32B32_FLOAT,
562         }});
563   ctx->blit_vbuf_state.vertexbuf.count = 2;
564   ctx->blit_vbuf_state.vertexbuf.vb[0].stride = 8;
565   ctx->blit_vbuf_state.vertexbuf.vb[0].buffer.resource =
566      ctx->blit_texcoord_vbuf;
567   ctx->blit_vbuf_state.vertexbuf.vb[1].stride = 12;
568   ctx->blit_vbuf_state.vertexbuf.vb[1].buffer.resource = ctx->solid_vbuf;
569}
570
571void
572fd_context_cleanup_common_vbos(struct fd_context *ctx)
573{
574   struct pipe_context *pctx = &ctx->base;
575
576   pctx->delete_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
577   pctx->delete_vertex_elements_state(pctx, ctx->blit_vbuf_state.vtx);
578
579   pipe_resource_reference(&ctx->solid_vbuf, NULL);
580   pipe_resource_reference(&ctx->blit_texcoord_vbuf, NULL);
581}
582
583struct pipe_context *
584fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
585                void *priv, unsigned flags)
586   disable_thread_safety_analysis
587{
588   struct fd_screen *screen = fd_screen(pscreen);
589   struct pipe_context *pctx;
590   unsigned prio = 1;
591
592   /* lower numerical value == higher priority: */
593   if (FD_DBG(HIPRIO))
594      prio = 0;
595   else if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
596      prio = 0;
597   else if (flags & PIPE_CONTEXT_LOW_PRIORITY)
598      prio = 2;
599
600   /* Some of the stats will get printed out at context destroy, so
601    * make sure they are collected:
602    */
603   if (FD_DBG(BSTAT) || FD_DBG(MSGS))
604      ctx->stats_users++;
605
606   ctx->screen = screen;
607   ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio);
608
609   ctx->in_fence_fd = -1;
610
611   if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) {
612      ctx->context_reset_count = fd_get_reset_count(ctx, true);
613      ctx->global_reset_count = fd_get_reset_count(ctx, false);
614   }
615
616   simple_mtx_init(&ctx->gmem_lock, mtx_plain);
617
618   /* need some sane default in case gallium frontends don't
619    * set some state:
620    */
621   ctx->sample_mask = 0xffff;
622   ctx->active_queries = true;
623
624   pctx = &ctx->base;
625   pctx->screen = pscreen;
626   pctx->priv = priv;
627   pctx->flush = fd_context_flush;
628   pctx->emit_string_marker = fd_emit_string_marker;
629   pctx->set_debug_callback = fd_set_debug_callback;
630   pctx->get_device_reset_status = fd_get_device_reset_status;
631   pctx->create_fence_fd = fd_create_fence_fd;
632   pctx->fence_server_sync = fd_fence_server_sync;
633   pctx->fence_server_signal = fd_fence_server_signal;
634   pctx->texture_barrier = fd_texture_barrier;
635   pctx->memory_barrier = fd_memory_barrier;
636
637   pctx->stream_uploader = u_upload_create_default(pctx);
638   if (!pctx->stream_uploader)
639      goto fail;
640   pctx->const_uploader = pctx->stream_uploader;
641
642   slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
643   slab_create_child(&ctx->transfer_pool_unsync, &screen->transfer_pool);
644
645   fd_draw_init(pctx);
646   fd_resource_context_init(pctx);
647   fd_query_context_init(pctx);
648   fd_texture_init(pctx);
649   fd_state_init(pctx);
650
651   ctx->blitter = util_blitter_create(pctx);
652   if (!ctx->blitter)
653      goto fail;
654
655   list_inithead(&ctx->hw_active_queries);
656   list_inithead(&ctx->acc_active_queries);
657
658   fd_screen_lock(ctx->screen);
659   ctx->seqno = ++screen->ctx_seqno;
660   list_add(&ctx->node, &ctx->screen->context_list);
661   fd_screen_unlock(ctx->screen);
662
663   ctx->current_scissor = &ctx->disabled_scissor;
664
665   u_trace_pipe_context_init(&ctx->trace_context, pctx,
666                             fd_trace_record_ts,
667                             fd_trace_read_ts,
668                             fd_trace_delete_flush_data);
669
670   fd_autotune_init(&ctx->autotune, screen->dev);
671
672   return pctx;
673
674fail:
675   pctx->destroy(pctx);
676   return NULL;
677}
678
679struct pipe_context *
680fd_context_init_tc(struct pipe_context *pctx, unsigned flags)
681{
682   struct fd_context *ctx = fd_context(pctx);
683
684   if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
685      return pctx;
686
687   /* Clover (compute-only) is unsupported. */
688   if (flags & PIPE_CONTEXT_COMPUTE_ONLY)
689      return pctx;
690
691   struct pipe_context *tc = threaded_context_create(
692      pctx, &ctx->screen->transfer_pool,
693      fd_replace_buffer_storage,
694      &(struct threaded_context_options){
695         .create_fence = fd_fence_create_unflushed,
696         .is_resource_busy = fd_resource_busy,
697         .unsynchronized_get_device_reset_status = true,
698      },
699      &ctx->tc);
700
701   if (tc && tc != pctx)
702      threaded_context_init_bytes_mapped_limit((struct threaded_context *)tc, 16);
703
704   return tc;
705}
706