1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <assert.h>
25#include <stdbool.h>
26#include <string.h>
27#include <unistd.h>
28#include <fcntl.h>
29
30#include "anv_private.h"
31
32#include "genxml/gen8_pack.h"
33
34#include "util/debug.h"
35
36/** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure.  This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state.  It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45/*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49static VkResult
50anv_reloc_list_init_clone(struct anv_reloc_list *list,
51                          const VkAllocationCallbacks *alloc,
52                          const struct anv_reloc_list *other_list)
53{
54   if (other_list) {
55      list->num_relocs = other_list->num_relocs;
56      list->array_length = other_list->array_length;
57   } else {
58      list->num_relocs = 0;
59      list->array_length = 256;
60   }
61
62   list->relocs =
63      vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
64                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
65
66   if (list->relocs == NULL)
67      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
68
69   list->reloc_bos =
70      vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
71                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
72
73   if (list->reloc_bos == NULL) {
74      vk_free(alloc, list->relocs);
75      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
76   }
77
78   list->deps = _mesa_pointer_set_create(NULL);
79
80   if (!list->deps) {
81      vk_free(alloc, list->relocs);
82      vk_free(alloc, list->reloc_bos);
83      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
84   }
85
86   if (other_list) {
87      memcpy(list->relocs, other_list->relocs,
88             list->array_length * sizeof(*list->relocs));
89      memcpy(list->reloc_bos, other_list->reloc_bos,
90             list->array_length * sizeof(*list->reloc_bos));
91      set_foreach(other_list->deps, entry) {
92         _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
93      }
94   }
95
96   return VK_SUCCESS;
97}
98
99VkResult
100anv_reloc_list_init(struct anv_reloc_list *list,
101                    const VkAllocationCallbacks *alloc)
102{
103   return anv_reloc_list_init_clone(list, alloc, NULL);
104}
105
106void
107anv_reloc_list_finish(struct anv_reloc_list *list,
108                      const VkAllocationCallbacks *alloc)
109{
110   vk_free(alloc, list->relocs);
111   vk_free(alloc, list->reloc_bos);
112   _mesa_set_destroy(list->deps, NULL);
113}
114
115static VkResult
116anv_reloc_list_grow(struct anv_reloc_list *list,
117                    const VkAllocationCallbacks *alloc,
118                    size_t num_additional_relocs)
119{
120   if (list->num_relocs + num_additional_relocs <= list->array_length)
121      return VK_SUCCESS;
122
123   size_t new_length = list->array_length * 2;
124   while (new_length < list->num_relocs + num_additional_relocs)
125      new_length *= 2;
126
127   struct drm_i915_gem_relocation_entry *new_relocs =
128      vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
129                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
130   if (new_relocs == NULL)
131      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
132
133   struct anv_bo **new_reloc_bos =
134      vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
135                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
136   if (new_reloc_bos == NULL) {
137      vk_free(alloc, new_relocs);
138      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
139   }
140
141   memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
142   memcpy(new_reloc_bos, list->reloc_bos,
143          list->num_relocs * sizeof(*list->reloc_bos));
144
145   vk_free(alloc, list->relocs);
146   vk_free(alloc, list->reloc_bos);
147
148   list->array_length = new_length;
149   list->relocs = new_relocs;
150   list->reloc_bos = new_reloc_bos;
151
152   return VK_SUCCESS;
153}
154
155VkResult
156anv_reloc_list_add(struct anv_reloc_list *list,
157                   const VkAllocationCallbacks *alloc,
158                   uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
159{
160   struct drm_i915_gem_relocation_entry *entry;
161   int index;
162
163   if (target_bo->flags & EXEC_OBJECT_PINNED) {
164      _mesa_set_add(list->deps, target_bo);
165      return VK_SUCCESS;
166   }
167
168   VkResult result = anv_reloc_list_grow(list, alloc, 1);
169   if (result != VK_SUCCESS)
170      return result;
171
172   /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
173   index = list->num_relocs++;
174   list->reloc_bos[index] = target_bo;
175   entry = &list->relocs[index];
176   entry->target_handle = target_bo->gem_handle;
177   entry->delta = delta;
178   entry->offset = offset;
179   entry->presumed_offset = target_bo->offset;
180   entry->read_domains = 0;
181   entry->write_domain = 0;
182   VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
183
184   return VK_SUCCESS;
185}
186
187static VkResult
188anv_reloc_list_append(struct anv_reloc_list *list,
189                      const VkAllocationCallbacks *alloc,
190                      struct anv_reloc_list *other, uint32_t offset)
191{
192   VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
193   if (result != VK_SUCCESS)
194      return result;
195
196   memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
197          other->num_relocs * sizeof(other->relocs[0]));
198   memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
199          other->num_relocs * sizeof(other->reloc_bos[0]));
200
201   for (uint32_t i = 0; i < other->num_relocs; i++)
202      list->relocs[i + list->num_relocs].offset += offset;
203
204   list->num_relocs += other->num_relocs;
205
206   set_foreach(other->deps, entry) {
207      _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
208   }
209
210   return VK_SUCCESS;
211}
212
213/*-----------------------------------------------------------------------*
214 * Functions related to anv_batch
215 *-----------------------------------------------------------------------*/
216
217void *
218anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
219{
220   if (batch->next + num_dwords * 4 > batch->end) {
221      VkResult result = batch->extend_cb(batch, batch->user_data);
222      if (result != VK_SUCCESS) {
223         anv_batch_set_error(batch, result);
224         return NULL;
225      }
226   }
227
228   void *p = batch->next;
229
230   batch->next += num_dwords * 4;
231   assert(batch->next <= batch->end);
232
233   return p;
234}
235
236uint64_t
237anv_batch_emit_reloc(struct anv_batch *batch,
238                     void *location, struct anv_bo *bo, uint32_t delta)
239{
240   VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
241                                        location - batch->start, bo, delta);
242   if (result != VK_SUCCESS) {
243      anv_batch_set_error(batch, result);
244      return 0;
245   }
246
247   return bo->offset + delta;
248}
249
250void
251anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
252{
253   uint32_t size, offset;
254
255   size = other->next - other->start;
256   assert(size % 4 == 0);
257
258   if (batch->next + size > batch->end) {
259      VkResult result = batch->extend_cb(batch, batch->user_data);
260      if (result != VK_SUCCESS) {
261         anv_batch_set_error(batch, result);
262         return;
263      }
264   }
265
266   assert(batch->next + size <= batch->end);
267
268   VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
269   memcpy(batch->next, other->start, size);
270
271   offset = batch->next - batch->start;
272   VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
273                                           other->relocs, offset);
274   if (result != VK_SUCCESS) {
275      anv_batch_set_error(batch, result);
276      return;
277   }
278
279   batch->next += size;
280}
281
282/*-----------------------------------------------------------------------*
283 * Functions related to anv_batch_bo
284 *-----------------------------------------------------------------------*/
285
286static VkResult
287anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
288                    struct anv_batch_bo **bbo_out)
289{
290   VkResult result;
291
292   struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
293                                        8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
294   if (bbo == NULL)
295      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
296
297   result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
298                              ANV_CMD_BUFFER_BATCH_SIZE);
299   if (result != VK_SUCCESS)
300      goto fail_alloc;
301
302   result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
303   if (result != VK_SUCCESS)
304      goto fail_bo_alloc;
305
306   *bbo_out = bbo;
307
308   return VK_SUCCESS;
309
310 fail_bo_alloc:
311   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
312 fail_alloc:
313   vk_free(&cmd_buffer->pool->alloc, bbo);
314
315   return result;
316}
317
318static VkResult
319anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
320                   const struct anv_batch_bo *other_bbo,
321                   struct anv_batch_bo **bbo_out)
322{
323   VkResult result;
324
325   struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
326                                        8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
327   if (bbo == NULL)
328      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
329
330   result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
331                              other_bbo->bo.size);
332   if (result != VK_SUCCESS)
333      goto fail_alloc;
334
335   result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
336                                      &other_bbo->relocs);
337   if (result != VK_SUCCESS)
338      goto fail_bo_alloc;
339
340   bbo->length = other_bbo->length;
341   memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
342
343   *bbo_out = bbo;
344
345   return VK_SUCCESS;
346
347 fail_bo_alloc:
348   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
349 fail_alloc:
350   vk_free(&cmd_buffer->pool->alloc, bbo);
351
352   return result;
353}
354
355static void
356anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
357                   size_t batch_padding)
358{
359   batch->next = batch->start = bbo->bo.map;
360   batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
361   batch->relocs = &bbo->relocs;
362   bbo->relocs.num_relocs = 0;
363   _mesa_set_clear(bbo->relocs.deps, NULL);
364}
365
366static void
367anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
368                      size_t batch_padding)
369{
370   batch->start = bbo->bo.map;
371   batch->next = bbo->bo.map + bbo->length;
372   batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
373   batch->relocs = &bbo->relocs;
374}
375
376static void
377anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
378{
379   assert(batch->start == bbo->bo.map);
380   bbo->length = batch->next - batch->start;
381   VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
382}
383
384static VkResult
385anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
386                  struct anv_batch *batch, size_t aditional,
387                  size_t batch_padding)
388{
389   assert(batch->start == bbo->bo.map);
390   bbo->length = batch->next - batch->start;
391
392   size_t new_size = bbo->bo.size;
393   while (new_size <= bbo->length + aditional + batch_padding)
394      new_size *= 2;
395
396   if (new_size == bbo->bo.size)
397      return VK_SUCCESS;
398
399   struct anv_bo new_bo;
400   VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
401                                       &new_bo, new_size);
402   if (result != VK_SUCCESS)
403      return result;
404
405   memcpy(new_bo.map, bbo->bo.map, bbo->length);
406
407   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
408
409   bbo->bo = new_bo;
410   anv_batch_bo_continue(bbo, batch, batch_padding);
411
412   return VK_SUCCESS;
413}
414
415static void
416anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
417                  struct anv_batch_bo *prev_bbo,
418                  struct anv_batch_bo *next_bbo,
419                  uint32_t next_bbo_offset)
420{
421   MAYBE_UNUSED const uint32_t bb_start_offset =
422      prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
423   MAYBE_UNUSED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
424
425   /* Make sure we're looking at a MI_BATCH_BUFFER_START */
426   assert(((*bb_start >> 29) & 0x07) == 0);
427   assert(((*bb_start >> 23) & 0x3f) == 49);
428
429   if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
430      assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
431      assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
432
433      write_reloc(cmd_buffer->device,
434                  prev_bbo->bo.map + bb_start_offset + 4,
435                  next_bbo->bo.offset + next_bbo_offset, true);
436   } else {
437      uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
438      assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
439
440      prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
441      prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
442
443      /* Use a bogus presumed offset to force a relocation */
444      prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
445   }
446}
447
448static void
449anv_batch_bo_destroy(struct anv_batch_bo *bbo,
450                     struct anv_cmd_buffer *cmd_buffer)
451{
452   anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
453   anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
454   vk_free(&cmd_buffer->pool->alloc, bbo);
455}
456
457static VkResult
458anv_batch_bo_list_clone(const struct list_head *list,
459                        struct anv_cmd_buffer *cmd_buffer,
460                        struct list_head *new_list)
461{
462   VkResult result = VK_SUCCESS;
463
464   list_inithead(new_list);
465
466   struct anv_batch_bo *prev_bbo = NULL;
467   list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
468      struct anv_batch_bo *new_bbo = NULL;
469      result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
470      if (result != VK_SUCCESS)
471         break;
472      list_addtail(&new_bbo->link, new_list);
473
474      if (prev_bbo)
475         anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
476
477      prev_bbo = new_bbo;
478   }
479
480   if (result != VK_SUCCESS) {
481      list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
482         anv_batch_bo_destroy(bbo, cmd_buffer);
483   }
484
485   return result;
486}
487
488/*-----------------------------------------------------------------------*
489 * Functions related to anv_batch_bo
490 *-----------------------------------------------------------------------*/
491
492static struct anv_batch_bo *
493anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
494{
495   return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
496}
497
498struct anv_address
499anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
500{
501   struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
502   return (struct anv_address) {
503      .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
504      .offset = bt_block->offset,
505   };
506}
507
508static void
509emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
510                        struct anv_bo *bo, uint32_t offset)
511{
512   /* In gen8+ the address field grew to two dwords to accomodate 48 bit
513    * offsets. The high 16 bits are in the last dword, so we can use the gen8
514    * version in either case, as long as we set the instruction length in the
515    * header accordingly.  This means that we always emit three dwords here
516    * and all the padding and adjustment we do in this file works for all
517    * gens.
518    */
519
520#define GEN7_MI_BATCH_BUFFER_START_length      2
521#define GEN7_MI_BATCH_BUFFER_START_length_bias      2
522
523   const uint32_t gen7_length =
524      GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
525   const uint32_t gen8_length =
526      GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
527
528   anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
529      bbs.DWordLength               = cmd_buffer->device->info.gen < 8 ?
530                                      gen7_length : gen8_length;
531      bbs.SecondLevelBatchBuffer    = Firstlevelbatch;
532      bbs.AddressSpaceIndicator     = ASI_PPGTT;
533      bbs.BatchBufferStartAddress   = (struct anv_address) { bo, offset };
534   }
535}
536
537static void
538cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
539                             struct anv_batch_bo *bbo)
540{
541   struct anv_batch *batch = &cmd_buffer->batch;
542   struct anv_batch_bo *current_bbo =
543      anv_cmd_buffer_current_batch_bo(cmd_buffer);
544
545   /* We set the end of the batch a little short so we would be sure we
546    * have room for the chaining command.  Since we're about to emit the
547    * chaining command, let's set it back where it should go.
548    */
549   batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
550   assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
551
552   emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
553
554   anv_batch_bo_finish(current_bbo, batch);
555}
556
557static VkResult
558anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
559{
560   struct anv_cmd_buffer *cmd_buffer = _data;
561   struct anv_batch_bo *new_bbo;
562
563   VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
564   if (result != VK_SUCCESS)
565      return result;
566
567   struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
568   if (seen_bbo == NULL) {
569      anv_batch_bo_destroy(new_bbo, cmd_buffer);
570      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
571   }
572   *seen_bbo = new_bbo;
573
574   cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
575
576   list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
577
578   anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
579
580   return VK_SUCCESS;
581}
582
583static VkResult
584anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
585{
586   struct anv_cmd_buffer *cmd_buffer = _data;
587   struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
588
589   anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
590                     GEN8_MI_BATCH_BUFFER_START_length * 4);
591
592   return VK_SUCCESS;
593}
594
595/** Allocate a binding table
596 *
597 * This function allocates a binding table.  This is a bit more complicated
598 * than one would think due to a combination of Vulkan driver design and some
599 * unfortunate hardware restrictions.
600 *
601 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
602 * the binding table pointer which means that all binding tables need to live
603 * in the bottom 64k of surface state base address.  The way the GL driver has
604 * classically dealt with this restriction is to emit all surface states
605 * on-the-fly into the batch and have a batch buffer smaller than 64k.  This
606 * isn't really an option in Vulkan for a couple of reasons:
607 *
608 *  1) In Vulkan, we have growing (or chaining) batches so surface states have
609 *     to live in their own buffer and we have to be able to re-emit
610 *     STATE_BASE_ADDRESS as needed which requires a full pipeline stall.  In
611 *     order to avoid emitting STATE_BASE_ADDRESS any more often than needed
612 *     (it's not that hard to hit 64k of just binding tables), we allocate
613 *     surface state objects up-front when VkImageView is created.  In order
614 *     for this to work, surface state objects need to be allocated from a
615 *     global buffer.
616 *
617 *  2) We tried to design the surface state system in such a way that it's
618 *     already ready for bindless texturing.  The way bindless texturing works
619 *     on our hardware is that you have a big pool of surface state objects
620 *     (with its own state base address) and the bindless handles are simply
621 *     offsets into that pool.  With the architecture we chose, we already
622 *     have that pool and it's exactly the same pool that we use for regular
623 *     surface states so we should already be ready for bindless.
624 *
625 *  3) For render targets, we need to be able to fill out the surface states
626 *     later in vkBeginRenderPass so that we can assign clear colors
627 *     correctly.  One way to do this would be to just create the surface
628 *     state data and then repeatedly copy it into the surface state BO every
629 *     time we have to re-emit STATE_BASE_ADDRESS.  While this works, it's
630 *     rather annoying and just being able to allocate them up-front and
631 *     re-use them for the entire render pass.
632 *
633 * While none of these are technically blockers for emitting state on the fly
634 * like we do in GL, the ability to have a single surface state pool is
635 * simplifies things greatly.  Unfortunately, it comes at a cost...
636 *
637 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
638 * place the binding tables just anywhere in surface state base address.
639 * Because 64k isn't a whole lot of space, we can't simply restrict the
640 * surface state buffer to 64k, we have to be more clever.  The solution we've
641 * chosen is to have a block pool with a maximum size of 2G that starts at
642 * zero and grows in both directions.  All surface states are allocated from
643 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
644 * binding tables from the bottom of the pool (negative offsets).  Every time
645 * we allocate a new binding table block, we set surface state base address to
646 * point to the bottom of the binding table block.  This way all of the
647 * binding tables in the block are in the bottom 64k of surface state base
648 * address.  When we fill out the binding table, we add the distance between
649 * the bottom of our binding table block and zero of the block pool to the
650 * surface state offsets so that they are correct relative to out new surface
651 * state base address at the bottom of the binding table block.
652 *
653 * \see adjust_relocations_from_block_pool()
654 * \see adjust_relocations_too_block_pool()
655 *
656 * \param[in]  entries        The number of surface state entries the binding
657 *                            table should be able to hold.
658 *
659 * \param[out] state_offset   The offset surface surface state base address
660 *                            where the surface states live.  This must be
661 *                            added to the surface state offset when it is
662 *                            written into the binding table entry.
663 *
664 * \return                    An anv_state representing the binding table
665 */
666struct anv_state
667anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
668                                   uint32_t entries, uint32_t *state_offset)
669{
670   struct anv_device *device = cmd_buffer->device;
671   struct anv_state_pool *state_pool = &device->surface_state_pool;
672   struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
673   struct anv_state state;
674
675   state.alloc_size = align_u32(entries * 4, 32);
676
677   if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
678      return (struct anv_state) { 0 };
679
680   state.offset = cmd_buffer->bt_next;
681   state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
682                                  bt_block->offset + state.offset);
683
684   cmd_buffer->bt_next += state.alloc_size;
685
686   if (device->instance->physicalDevice.use_softpin) {
687      assert(bt_block->offset >= 0);
688      *state_offset = device->surface_state_pool.block_pool.start_address -
689         device->binding_table_pool.block_pool.start_address - bt_block->offset;
690   } else {
691      assert(bt_block->offset < 0);
692      *state_offset = -bt_block->offset;
693   }
694
695   return state;
696}
697
698struct anv_state
699anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
700{
701   struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
702   return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
703                                 isl_dev->ss.size, isl_dev->ss.align);
704}
705
706struct anv_state
707anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
708                                   uint32_t size, uint32_t alignment)
709{
710   return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
711                                 size, alignment);
712}
713
714VkResult
715anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
716{
717   struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
718   if (bt_block == NULL) {
719      anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
720      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
721   }
722
723   *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
724   cmd_buffer->bt_next = 0;
725
726   return VK_SUCCESS;
727}
728
729VkResult
730anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
731{
732   struct anv_batch_bo *batch_bo;
733   VkResult result;
734
735   list_inithead(&cmd_buffer->batch_bos);
736
737   result = anv_batch_bo_create(cmd_buffer, &batch_bo);
738   if (result != VK_SUCCESS)
739      return result;
740
741   list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
742
743   cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
744   cmd_buffer->batch.user_data = cmd_buffer;
745
746   if (cmd_buffer->device->can_chain_batches) {
747      cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
748   } else {
749      cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
750   }
751
752   anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
753                      GEN8_MI_BATCH_BUFFER_START_length * 4);
754
755   int success = u_vector_init(&cmd_buffer->seen_bbos,
756                                 sizeof(struct anv_bo *),
757                                 8 * sizeof(struct anv_bo *));
758   if (!success)
759      goto fail_batch_bo;
760
761   *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
762
763   /* u_vector requires power-of-two size elements */
764   unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
765   success = u_vector_init(&cmd_buffer->bt_block_states,
766                           pow2_state_size, 8 * pow2_state_size);
767   if (!success)
768      goto fail_seen_bbos;
769
770   result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
771                                &cmd_buffer->pool->alloc);
772   if (result != VK_SUCCESS)
773      goto fail_bt_blocks;
774   cmd_buffer->last_ss_pool_center = 0;
775
776   result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
777   if (result != VK_SUCCESS)
778      goto fail_bt_blocks;
779
780   return VK_SUCCESS;
781
782 fail_bt_blocks:
783   u_vector_finish(&cmd_buffer->bt_block_states);
784 fail_seen_bbos:
785   u_vector_finish(&cmd_buffer->seen_bbos);
786 fail_batch_bo:
787   anv_batch_bo_destroy(batch_bo, cmd_buffer);
788
789   return result;
790}
791
792void
793anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
794{
795   struct anv_state *bt_block;
796   u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
797      anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
798   u_vector_finish(&cmd_buffer->bt_block_states);
799
800   anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
801
802   u_vector_finish(&cmd_buffer->seen_bbos);
803
804   /* Destroy all of the batch buffers */
805   list_for_each_entry_safe(struct anv_batch_bo, bbo,
806                            &cmd_buffer->batch_bos, link) {
807      anv_batch_bo_destroy(bbo, cmd_buffer);
808   }
809}
810
811void
812anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
813{
814   /* Delete all but the first batch bo */
815   assert(!list_empty(&cmd_buffer->batch_bos));
816   while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
817      struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
818      list_del(&bbo->link);
819      anv_batch_bo_destroy(bbo, cmd_buffer);
820   }
821   assert(!list_empty(&cmd_buffer->batch_bos));
822
823   anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
824                      &cmd_buffer->batch,
825                      GEN8_MI_BATCH_BUFFER_START_length * 4);
826
827   while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
828      struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
829      anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
830   }
831   assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
832   cmd_buffer->bt_next = 0;
833
834   cmd_buffer->surface_relocs.num_relocs = 0;
835   _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
836   cmd_buffer->last_ss_pool_center = 0;
837
838   /* Reset the list of seen buffers */
839   cmd_buffer->seen_bbos.head = 0;
840   cmd_buffer->seen_bbos.tail = 0;
841
842   *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
843      anv_cmd_buffer_current_batch_bo(cmd_buffer);
844}
845
846void
847anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
848{
849   struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
850
851   if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
852      /* When we start a batch buffer, we subtract a certain amount of
853       * padding from the end to ensure that we always have room to emit a
854       * BATCH_BUFFER_START to chain to the next BO.  We need to remove
855       * that padding before we end the batch; otherwise, we may end up
856       * with our BATCH_BUFFER_END in another BO.
857       */
858      cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
859      assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
860
861      anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
862
863      /* Round batch up to an even number of dwords. */
864      if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
865         anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
866
867      cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
868   } else {
869      assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
870      /* If this is a secondary command buffer, we need to determine the
871       * mode in which it will be executed with vkExecuteCommands.  We
872       * determine this statically here so that this stays in sync with the
873       * actual ExecuteCommands implementation.
874       */
875      const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
876      if (!cmd_buffer->device->can_chain_batches) {
877         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
878      } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
879                 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
880         /* If the secondary has exactly one batch buffer in its list *and*
881          * that batch buffer is less than half of the maximum size, we're
882          * probably better of simply copying it into our batch.
883          */
884         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
885      } else if (!(cmd_buffer->usage_flags &
886                   VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
887         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
888
889         /* In order to chain, we need this command buffer to contain an
890          * MI_BATCH_BUFFER_START which will jump back to the calling batch.
891          * It doesn't matter where it points now so long as has a valid
892          * relocation.  We'll adjust it later as part of the chaining
893          * process.
894          *
895          * We set the end of the batch a little short so we would be sure we
896          * have room for the chaining command.  Since we're about to emit the
897          * chaining command, let's set it back where it should go.
898          */
899         cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
900         assert(cmd_buffer->batch.start == batch_bo->bo.map);
901         assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
902
903         emit_batch_buffer_start(cmd_buffer, &batch_bo->bo, 0);
904         assert(cmd_buffer->batch.start == batch_bo->bo.map);
905      } else {
906         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
907      }
908   }
909
910   anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
911}
912
913static VkResult
914anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
915                             struct list_head *list)
916{
917   list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
918      struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
919      if (bbo_ptr == NULL)
920         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
921
922      *bbo_ptr = bbo;
923   }
924
925   return VK_SUCCESS;
926}
927
928void
929anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
930                             struct anv_cmd_buffer *secondary)
931{
932   switch (secondary->exec_mode) {
933   case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
934      anv_batch_emit_batch(&primary->batch, &secondary->batch);
935      break;
936   case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
937      struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
938      unsigned length = secondary->batch.end - secondary->batch.start;
939      anv_batch_bo_grow(primary, bbo, &primary->batch, length,
940                        GEN8_MI_BATCH_BUFFER_START_length * 4);
941      anv_batch_emit_batch(&primary->batch, &secondary->batch);
942      break;
943   }
944   case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
945      struct anv_batch_bo *first_bbo =
946         list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
947      struct anv_batch_bo *last_bbo =
948         list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
949
950      emit_batch_buffer_start(primary, &first_bbo->bo, 0);
951
952      struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
953      assert(primary->batch.start == this_bbo->bo.map);
954      uint32_t offset = primary->batch.next - primary->batch.start;
955
956      /* Make the tail of the secondary point back to right after the
957       * MI_BATCH_BUFFER_START in the primary batch.
958       */
959      anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
960
961      anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
962      break;
963   }
964   case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
965      struct list_head copy_list;
966      VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
967                                                secondary,
968                                                &copy_list);
969      if (result != VK_SUCCESS)
970         return; /* FIXME */
971
972      anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
973
974      struct anv_batch_bo *first_bbo =
975         list_first_entry(&copy_list, struct anv_batch_bo, link);
976      struct anv_batch_bo *last_bbo =
977         list_last_entry(&copy_list, struct anv_batch_bo, link);
978
979      cmd_buffer_chain_to_batch_bo(primary, first_bbo);
980
981      list_splicetail(&copy_list, &primary->batch_bos);
982
983      anv_batch_bo_continue(last_bbo, &primary->batch,
984                            GEN8_MI_BATCH_BUFFER_START_length * 4);
985      break;
986   }
987   default:
988      assert(!"Invalid execution mode");
989   }
990
991   anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
992                         &secondary->surface_relocs, 0);
993}
994
995struct anv_execbuf {
996   struct drm_i915_gem_execbuffer2           execbuf;
997
998   struct drm_i915_gem_exec_object2 *        objects;
999   uint32_t                                  bo_count;
1000   struct anv_bo **                          bos;
1001
1002   /* Allocated length of the 'objects' and 'bos' arrays */
1003   uint32_t                                  array_length;
1004
1005   bool                                      has_relocs;
1006
1007   uint32_t                                  fence_count;
1008   uint32_t                                  fence_array_length;
1009   struct drm_i915_gem_exec_fence *          fences;
1010   struct anv_syncobj **                     syncobjs;
1011};
1012
1013static void
1014anv_execbuf_init(struct anv_execbuf *exec)
1015{
1016   memset(exec, 0, sizeof(*exec));
1017}
1018
1019static void
1020anv_execbuf_finish(struct anv_execbuf *exec,
1021                   const VkAllocationCallbacks *alloc)
1022{
1023   vk_free(alloc, exec->objects);
1024   vk_free(alloc, exec->bos);
1025   vk_free(alloc, exec->fences);
1026   vk_free(alloc, exec->syncobjs);
1027}
1028
1029static int
1030_compare_bo_handles(const void *_bo1, const void *_bo2)
1031{
1032   struct anv_bo * const *bo1 = _bo1;
1033   struct anv_bo * const *bo2 = _bo2;
1034
1035   return (*bo1)->gem_handle - (*bo2)->gem_handle;
1036}
1037
1038static VkResult
1039anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1040                       struct set *deps,
1041                       uint32_t extra_flags,
1042                       const VkAllocationCallbacks *alloc);
1043
1044static VkResult
1045anv_execbuf_add_bo(struct anv_execbuf *exec,
1046                   struct anv_bo *bo,
1047                   struct anv_reloc_list *relocs,
1048                   uint32_t extra_flags,
1049                   const VkAllocationCallbacks *alloc)
1050{
1051   struct drm_i915_gem_exec_object2 *obj = NULL;
1052
1053   if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1054      obj = &exec->objects[bo->index];
1055
1056   if (obj == NULL) {
1057      /* We've never seen this one before.  Add it to the list and assign
1058       * an id that we can use later.
1059       */
1060      if (exec->bo_count >= exec->array_length) {
1061         uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1062
1063         struct drm_i915_gem_exec_object2 *new_objects =
1064            vk_alloc(alloc, new_len * sizeof(*new_objects),
1065                     8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1066         if (new_objects == NULL)
1067            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1068
1069         struct anv_bo **new_bos =
1070            vk_alloc(alloc, new_len * sizeof(*new_bos),
1071                      8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1072         if (new_bos == NULL) {
1073            vk_free(alloc, new_objects);
1074            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1075         }
1076
1077         if (exec->objects) {
1078            memcpy(new_objects, exec->objects,
1079                   exec->bo_count * sizeof(*new_objects));
1080            memcpy(new_bos, exec->bos,
1081                   exec->bo_count * sizeof(*new_bos));
1082         }
1083
1084         vk_free(alloc, exec->objects);
1085         vk_free(alloc, exec->bos);
1086
1087         exec->objects = new_objects;
1088         exec->bos = new_bos;
1089         exec->array_length = new_len;
1090      }
1091
1092      assert(exec->bo_count < exec->array_length);
1093
1094      bo->index = exec->bo_count++;
1095      obj = &exec->objects[bo->index];
1096      exec->bos[bo->index] = bo;
1097
1098      obj->handle = bo->gem_handle;
1099      obj->relocation_count = 0;
1100      obj->relocs_ptr = 0;
1101      obj->alignment = 0;
1102      obj->offset = bo->offset;
1103      obj->flags = (bo->flags & ~ANV_BO_FLAG_MASK) | extra_flags;
1104      obj->rsvd1 = 0;
1105      obj->rsvd2 = 0;
1106   }
1107
1108   if (relocs != NULL) {
1109      assert(obj->relocation_count == 0);
1110
1111      if (relocs->num_relocs > 0) {
1112         /* This is the first time we've ever seen a list of relocations for
1113          * this BO.  Go ahead and set the relocations and then walk the list
1114          * of relocations and add them all.
1115          */
1116         exec->has_relocs = true;
1117         obj->relocation_count = relocs->num_relocs;
1118         obj->relocs_ptr = (uintptr_t) relocs->relocs;
1119
1120         for (size_t i = 0; i < relocs->num_relocs; i++) {
1121            VkResult result;
1122
1123            /* A quick sanity check on relocations */
1124            assert(relocs->relocs[i].offset < bo->size);
1125            result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
1126                                        extra_flags, alloc);
1127
1128            if (result != VK_SUCCESS)
1129               return result;
1130         }
1131      }
1132
1133      return anv_execbuf_add_bo_set(exec, relocs->deps, extra_flags, alloc);
1134   }
1135
1136   return VK_SUCCESS;
1137}
1138
1139/* Add BO dependencies to execbuf */
1140static VkResult
1141anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1142                       struct set *deps,
1143                       uint32_t extra_flags,
1144                       const VkAllocationCallbacks *alloc)
1145{
1146   if (!deps || deps->entries <= 0)
1147      return VK_SUCCESS;
1148
1149   const uint32_t entries = deps->entries;
1150   struct anv_bo **bos =
1151      vk_alloc(alloc, entries * sizeof(*bos),
1152               8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1153   if (bos == NULL)
1154      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1155
1156   struct anv_bo **bo = bos;
1157   set_foreach(deps, entry) {
1158      *bo++ = (void *)entry->key;
1159   }
1160
1161   qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
1162
1163   VkResult result = VK_SUCCESS;
1164   for (bo = bos; bo < bos + entries; bo++) {
1165      result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
1166      if (result != VK_SUCCESS)
1167         break;
1168   }
1169
1170   vk_free(alloc, bos);
1171
1172   return result;
1173}
1174
1175static VkResult
1176anv_execbuf_add_syncobj(struct anv_execbuf *exec,
1177                        uint32_t handle, uint32_t flags,
1178                        const VkAllocationCallbacks *alloc)
1179{
1180   assert(flags != 0);
1181
1182   if (exec->fence_count >= exec->fence_array_length) {
1183      uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
1184
1185      exec->fences = vk_realloc(alloc, exec->fences,
1186                                new_len * sizeof(*exec->fences),
1187                                8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1188      if (exec->fences == NULL)
1189         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1190
1191      exec->fence_array_length = new_len;
1192   }
1193
1194   exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
1195      .handle = handle,
1196      .flags = flags,
1197   };
1198
1199   exec->fence_count++;
1200
1201   return VK_SUCCESS;
1202}
1203
1204static void
1205anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1206                              struct anv_reloc_list *list)
1207{
1208   for (size_t i = 0; i < list->num_relocs; i++)
1209      list->relocs[i].target_handle = list->reloc_bos[i]->index;
1210}
1211
1212static void
1213adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1214                                   struct anv_reloc_list *relocs,
1215                                   uint32_t last_pool_center_bo_offset)
1216{
1217   assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1218   uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1219
1220   for (size_t i = 0; i < relocs->num_relocs; i++) {
1221      /* All of the relocations from this block pool to other BO's should
1222       * have been emitted relative to the surface block pool center.  We
1223       * need to add the center offset to make them relative to the
1224       * beginning of the actual GEM bo.
1225       */
1226      relocs->relocs[i].offset += delta;
1227   }
1228}
1229
1230static void
1231adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1232                                 struct anv_bo *from_bo,
1233                                 struct anv_reloc_list *relocs,
1234                                 uint32_t last_pool_center_bo_offset)
1235{
1236   assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1237   uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1238
1239   /* When we initially emit relocations into a block pool, we don't
1240    * actually know what the final center_bo_offset will be so we just emit
1241    * it as if center_bo_offset == 0.  Now that we know what the center
1242    * offset is, we need to walk the list of relocations and adjust any
1243    * relocations that point to the pool bo with the correct offset.
1244    */
1245   for (size_t i = 0; i < relocs->num_relocs; i++) {
1246      if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1247         /* Adjust the delta value in the relocation to correctly
1248          * correspond to the new delta.  Initially, this value may have
1249          * been negative (if treated as unsigned), but we trust in
1250          * uint32_t roll-over to fix that for us at this point.
1251          */
1252         relocs->relocs[i].delta += delta;
1253
1254         /* Since the delta has changed, we need to update the actual
1255          * relocated value with the new presumed value.  This function
1256          * should only be called on batch buffers, so we know it isn't in
1257          * use by the GPU at the moment.
1258          */
1259         assert(relocs->relocs[i].offset < from_bo->size);
1260         write_reloc(pool->block_pool.device,
1261                     from_bo->map + relocs->relocs[i].offset,
1262                     relocs->relocs[i].presumed_offset +
1263                     relocs->relocs[i].delta, false);
1264      }
1265   }
1266}
1267
1268static void
1269anv_reloc_list_apply(struct anv_device *device,
1270                     struct anv_reloc_list *list,
1271                     struct anv_bo *bo,
1272                     bool always_relocate)
1273{
1274   for (size_t i = 0; i < list->num_relocs; i++) {
1275      struct anv_bo *target_bo = list->reloc_bos[i];
1276      if (list->relocs[i].presumed_offset == target_bo->offset &&
1277          !always_relocate)
1278         continue;
1279
1280      void *p = bo->map + list->relocs[i].offset;
1281      write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1282      list->relocs[i].presumed_offset = target_bo->offset;
1283   }
1284}
1285
1286/**
1287 * This function applies the relocation for a command buffer and writes the
1288 * actual addresses into the buffers as per what we were told by the kernel on
1289 * the previous execbuf2 call.  This should be safe to do because, for each
1290 * relocated address, we have two cases:
1291 *
1292 *  1) The target BO is inactive (as seen by the kernel).  In this case, it is
1293 *     not in use by the GPU so updating the address is 100% ok.  It won't be
1294 *     in-use by the GPU (from our context) again until the next execbuf2
1295 *     happens.  If the kernel decides to move it in the next execbuf2, it
1296 *     will have to do the relocations itself, but that's ok because it should
1297 *     have all of the information needed to do so.
1298 *
1299 *  2) The target BO is active (as seen by the kernel).  In this case, it
1300 *     hasn't moved since the last execbuffer2 call because GTT shuffling
1301 *     *only* happens when the BO is idle. (From our perspective, it only
1302 *     happens inside the execbuffer2 ioctl, but the shuffling may be
1303 *     triggered by another ioctl, with full-ppgtt this is limited to only
1304 *     execbuffer2 ioctls on the same context, or memory pressure.)  Since the
1305 *     target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1306 *     address and the relocated value we are writing into the BO will be the
1307 *     same as the value that is already there.
1308 *
1309 *     There is also a possibility that the target BO is active but the exact
1310 *     RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1311 *     use.  In this case, the address currently in the RENDER_SURFACE_STATE
1312 *     may be stale but it's still safe to write the relocation because that
1313 *     particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1314 *     won't be until the next execbuf2 call.
1315 *
1316 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1317 * need to bother.  We want to do this because the surface state buffer is
1318 * used by every command buffer so, if the kernel does the relocations, it
1319 * will always be busy and the kernel will always stall.  This is also
1320 * probably the fastest mechanism for doing relocations since the kernel would
1321 * have to make a full copy of all the relocations lists.
1322 */
1323static bool
1324relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1325                    struct anv_execbuf *exec)
1326{
1327   if (!exec->has_relocs)
1328      return true;
1329
1330   static int userspace_relocs = -1;
1331   if (userspace_relocs < 0)
1332      userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1333   if (!userspace_relocs)
1334      return false;
1335
1336   /* First, we have to check to see whether or not we can even do the
1337    * relocation.  New buffers which have never been submitted to the kernel
1338    * don't have a valid offset so we need to let the kernel do relocations so
1339    * that we can get offsets for them.  On future execbuf2 calls, those
1340    * buffers will have offsets and we will be able to skip relocating.
1341    * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1342    */
1343   for (uint32_t i = 0; i < exec->bo_count; i++) {
1344      if (exec->bos[i]->offset == (uint64_t)-1)
1345         return false;
1346   }
1347
1348   /* Since surface states are shared between command buffers and we don't
1349    * know what order they will be submitted to the kernel, we don't know
1350    * what address is actually written in the surface state object at any
1351    * given time.  The only option is to always relocate them.
1352    */
1353   anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1354                        cmd_buffer->device->surface_state_pool.block_pool.bo,
1355                        true /* always relocate surface states */);
1356
1357   /* Since we own all of the batch buffers, we know what values are stored
1358    * in the relocated addresses and only have to update them if the offsets
1359    * have changed.
1360    */
1361   struct anv_batch_bo **bbo;
1362   u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1363      anv_reloc_list_apply(cmd_buffer->device,
1364                           &(*bbo)->relocs, &(*bbo)->bo, false);
1365   }
1366
1367   for (uint32_t i = 0; i < exec->bo_count; i++)
1368      exec->objects[i].offset = exec->bos[i]->offset;
1369
1370   return true;
1371}
1372
1373static VkResult
1374setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1375                             struct anv_cmd_buffer *cmd_buffer)
1376{
1377   struct anv_batch *batch = &cmd_buffer->batch;
1378   struct anv_state_pool *ss_pool =
1379      &cmd_buffer->device->surface_state_pool;
1380
1381   adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1382                                      cmd_buffer->last_ss_pool_center);
1383   VkResult result;
1384   struct anv_bo *bo;
1385   if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
1386      anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1387         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1388                                     &cmd_buffer->device->alloc);
1389         if (result != VK_SUCCESS)
1390            return result;
1391      }
1392      /* Add surface dependencies (BOs) to the execbuf */
1393      anv_execbuf_add_bo_set(execbuf, cmd_buffer->surface_relocs.deps, 0,
1394                             &cmd_buffer->device->alloc);
1395
1396      /* Add the BOs for all memory objects */
1397      list_for_each_entry(struct anv_device_memory, mem,
1398                          &cmd_buffer->device->memory_objects, link) {
1399         result = anv_execbuf_add_bo(execbuf, mem->bo, NULL, 0,
1400                                     &cmd_buffer->device->alloc);
1401         if (result != VK_SUCCESS)
1402            return result;
1403      }
1404
1405      struct anv_block_pool *pool;
1406      pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1407      anv_block_pool_foreach_bo(bo, pool) {
1408         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1409                                     &cmd_buffer->device->alloc);
1410         if (result != VK_SUCCESS)
1411            return result;
1412      }
1413
1414      pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1415      anv_block_pool_foreach_bo(bo, pool) {
1416         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1417                                     &cmd_buffer->device->alloc);
1418         if (result != VK_SUCCESS)
1419            return result;
1420      }
1421
1422      pool = &cmd_buffer->device->binding_table_pool.block_pool;
1423      anv_block_pool_foreach_bo(bo, pool) {
1424         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1425                                     &cmd_buffer->device->alloc);
1426         if (result != VK_SUCCESS)
1427            return result;
1428      }
1429   } else {
1430      /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1431       * will get added automatically by processing relocations on the batch
1432       * buffer.  We have to add the surface state BO manually because it has
1433       * relocations of its own that we need to be sure are processsed.
1434       */
1435      result = anv_execbuf_add_bo(execbuf, ss_pool->block_pool.bo,
1436                                  &cmd_buffer->surface_relocs, 0,
1437                                  &cmd_buffer->device->alloc);
1438      if (result != VK_SUCCESS)
1439         return result;
1440   }
1441
1442   /* First, we walk over all of the bos we've seen and add them and their
1443    * relocations to the validate list.
1444    */
1445   struct anv_batch_bo **bbo;
1446   u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1447      adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1448                                       cmd_buffer->last_ss_pool_center);
1449
1450      result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
1451                                  &cmd_buffer->device->alloc);
1452      if (result != VK_SUCCESS)
1453         return result;
1454   }
1455
1456   /* Now that we've adjusted all of the surface state relocations, we need to
1457    * record the surface state pool center so future executions of the command
1458    * buffer can adjust correctly.
1459    */
1460   cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1461
1462   struct anv_batch_bo *first_batch_bo =
1463      list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1464
1465   /* The kernel requires that the last entry in the validation list be the
1466    * batch buffer to execute.  We can simply swap the element
1467    * corresponding to the first batch_bo in the chain with the last
1468    * element in the list.
1469    */
1470   if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
1471      uint32_t idx = first_batch_bo->bo.index;
1472      uint32_t last_idx = execbuf->bo_count - 1;
1473
1474      struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1475      assert(execbuf->bos[idx] == &first_batch_bo->bo);
1476
1477      execbuf->objects[idx] = execbuf->objects[last_idx];
1478      execbuf->bos[idx] = execbuf->bos[last_idx];
1479      execbuf->bos[idx]->index = idx;
1480
1481      execbuf->objects[last_idx] = tmp_obj;
1482      execbuf->bos[last_idx] = &first_batch_bo->bo;
1483      first_batch_bo->bo.index = last_idx;
1484   }
1485
1486   /* If we are pinning our BOs, we shouldn't have to relocate anything */
1487   if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1488      assert(!execbuf->has_relocs);
1489
1490   /* Now we go through and fixup all of the relocation lists to point to
1491    * the correct indices in the object array.  We have to do this after we
1492    * reorder the list above as some of the indices may have changed.
1493    */
1494   if (execbuf->has_relocs) {
1495      u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1496         anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1497
1498      anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1499   }
1500
1501   if (!cmd_buffer->device->info.has_llc) {
1502      __builtin_ia32_mfence();
1503      u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1504         for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1505            __builtin_ia32_clflush((*bbo)->bo.map + i);
1506      }
1507   }
1508
1509   execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1510      .buffers_ptr = (uintptr_t) execbuf->objects,
1511      .buffer_count = execbuf->bo_count,
1512      .batch_start_offset = 0,
1513      .batch_len = batch->next - batch->start,
1514      .cliprects_ptr = 0,
1515      .num_cliprects = 0,
1516      .DR1 = 0,
1517      .DR4 = 0,
1518      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1519      .rsvd1 = cmd_buffer->device->context_id,
1520      .rsvd2 = 0,
1521   };
1522
1523   if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1524      /* If we were able to successfully relocate everything, tell the kernel
1525       * that it can skip doing relocations. The requirement for using
1526       * NO_RELOC is:
1527       *
1528       *  1) The addresses written in the objects must match the corresponding
1529       *     reloc.presumed_offset which in turn must match the corresponding
1530       *     execobject.offset.
1531       *
1532       *  2) To avoid stalling, execobject.offset should match the current
1533       *     address of that object within the active context.
1534       *
1535       * In order to satisfy all of the invariants that make userspace
1536       * relocations to be safe (see relocate_cmd_buffer()), we need to
1537       * further ensure that the addresses we use match those used by the
1538       * kernel for the most recent execbuf2.
1539       *
1540       * The kernel may still choose to do relocations anyway if something has
1541       * moved in the GTT. In this case, the relocation list still needs to be
1542       * valid.  All relocations on the batch buffers are already valid and
1543       * kept up-to-date.  For surface state relocations, by applying the
1544       * relocations in relocate_cmd_buffer, we ensured that the address in
1545       * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1546       * safe for the kernel to relocate them as needed.
1547       */
1548      execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1549   } else {
1550      /* In the case where we fall back to doing kernel relocations, we need
1551       * to ensure that the relocation list is valid.  All relocations on the
1552       * batch buffers are already valid and kept up-to-date.  Since surface
1553       * states are shared between command buffers and we don't know what
1554       * order they will be submitted to the kernel, we don't know what
1555       * address is actually written in the surface state object at any given
1556       * time.  The only option is to set a bogus presumed offset and let the
1557       * kernel relocate them.
1558       */
1559      for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1560         cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1561   }
1562
1563   return VK_SUCCESS;
1564}
1565
1566static VkResult
1567setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1568{
1569   VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
1570                                        NULL, 0, &device->alloc);
1571   if (result != VK_SUCCESS)
1572      return result;
1573
1574   execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1575      .buffers_ptr = (uintptr_t) execbuf->objects,
1576      .buffer_count = execbuf->bo_count,
1577      .batch_start_offset = 0,
1578      .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1579      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1580      .rsvd1 = device->context_id,
1581      .rsvd2 = 0,
1582   };
1583
1584   return VK_SUCCESS;
1585}
1586
1587VkResult
1588anv_cmd_buffer_execbuf(struct anv_device *device,
1589                       struct anv_cmd_buffer *cmd_buffer,
1590                       const VkSemaphore *in_semaphores,
1591                       uint32_t num_in_semaphores,
1592                       const VkSemaphore *out_semaphores,
1593                       uint32_t num_out_semaphores,
1594                       VkFence _fence)
1595{
1596   ANV_FROM_HANDLE(anv_fence, fence, _fence);
1597
1598   struct anv_execbuf execbuf;
1599   anv_execbuf_init(&execbuf);
1600
1601   int in_fence = -1;
1602   VkResult result = VK_SUCCESS;
1603   for (uint32_t i = 0; i < num_in_semaphores; i++) {
1604      ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1605      struct anv_semaphore_impl *impl =
1606         semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1607         &semaphore->temporary : &semaphore->permanent;
1608
1609      switch (impl->type) {
1610      case ANV_SEMAPHORE_TYPE_BO:
1611         result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1612                                     0, &device->alloc);
1613         if (result != VK_SUCCESS)
1614            return result;
1615         break;
1616
1617      case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1618         if (in_fence == -1) {
1619            in_fence = impl->fd;
1620         } else {
1621            int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
1622            if (merge == -1)
1623               return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1624
1625            close(impl->fd);
1626            close(in_fence);
1627            in_fence = merge;
1628         }
1629
1630         impl->fd = -1;
1631         break;
1632
1633      case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1634         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1635                                          I915_EXEC_FENCE_WAIT,
1636                                          &device->alloc);
1637         if (result != VK_SUCCESS)
1638            return result;
1639         break;
1640
1641      default:
1642         break;
1643      }
1644   }
1645
1646   bool need_out_fence = false;
1647   for (uint32_t i = 0; i < num_out_semaphores; i++) {
1648      ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1649
1650      /* Under most circumstances, out fences won't be temporary.  However,
1651       * the spec does allow it for opaque_fd.  From the Vulkan 1.0.53 spec:
1652       *
1653       *    "If the import is temporary, the implementation must restore the
1654       *    semaphore to its prior permanent state after submitting the next
1655       *    semaphore wait operation."
1656       *
1657       * The spec says nothing whatsoever about signal operations on
1658       * temporarily imported semaphores so it appears they are allowed.
1659       * There are also CTS tests that require this to work.
1660       */
1661      struct anv_semaphore_impl *impl =
1662         semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1663         &semaphore->temporary : &semaphore->permanent;
1664
1665      switch (impl->type) {
1666      case ANV_SEMAPHORE_TYPE_BO:
1667         result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1668                                     EXEC_OBJECT_WRITE, &device->alloc);
1669         if (result != VK_SUCCESS)
1670            return result;
1671         break;
1672
1673      case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1674         need_out_fence = true;
1675         break;
1676
1677      case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1678         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1679                                          I915_EXEC_FENCE_SIGNAL,
1680                                          &device->alloc);
1681         if (result != VK_SUCCESS)
1682            return result;
1683         break;
1684
1685      default:
1686         break;
1687      }
1688   }
1689
1690   if (fence) {
1691      /* Under most circumstances, out fences won't be temporary.  However,
1692       * the spec does allow it for opaque_fd.  From the Vulkan 1.0.53 spec:
1693       *
1694       *    "If the import is temporary, the implementation must restore the
1695       *    semaphore to its prior permanent state after submitting the next
1696       *    semaphore wait operation."
1697       *
1698       * The spec says nothing whatsoever about signal operations on
1699       * temporarily imported semaphores so it appears they are allowed.
1700       * There are also CTS tests that require this to work.
1701       */
1702      struct anv_fence_impl *impl =
1703         fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1704         &fence->temporary : &fence->permanent;
1705
1706      switch (impl->type) {
1707      case ANV_FENCE_TYPE_BO:
1708         result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
1709                                     EXEC_OBJECT_WRITE, &device->alloc);
1710         if (result != VK_SUCCESS)
1711            return result;
1712         break;
1713
1714      case ANV_FENCE_TYPE_SYNCOBJ:
1715         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1716                                          I915_EXEC_FENCE_SIGNAL,
1717                                          &device->alloc);
1718         if (result != VK_SUCCESS)
1719            return result;
1720         break;
1721
1722      default:
1723         unreachable("Invalid fence type");
1724      }
1725   }
1726
1727   if (cmd_buffer) {
1728      if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1729         struct anv_batch_bo **bo = u_vector_head(&cmd_buffer->seen_bbos);
1730
1731         device->cmd_buffer_being_decoded = cmd_buffer;
1732         gen_print_batch(&device->decoder_ctx, (*bo)->bo.map,
1733                         (*bo)->bo.size, (*bo)->bo.offset, false);
1734         device->cmd_buffer_being_decoded = NULL;
1735      }
1736
1737      result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1738   } else {
1739      result = setup_empty_execbuf(&execbuf, device);
1740   }
1741
1742   if (result != VK_SUCCESS)
1743      return result;
1744
1745   if (execbuf.fence_count > 0) {
1746      assert(device->instance->physicalDevice.has_syncobj);
1747      execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1748      execbuf.execbuf.num_cliprects = execbuf.fence_count;
1749      execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
1750   }
1751
1752   if (in_fence != -1) {
1753      execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1754      execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
1755   }
1756
1757   if (need_out_fence)
1758      execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1759
1760   result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1761
1762   /* Execbuf does not consume the in_fence.  It's our job to close it. */
1763   if (in_fence != -1)
1764      close(in_fence);
1765
1766   for (uint32_t i = 0; i < num_in_semaphores; i++) {
1767      ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1768      /* From the Vulkan 1.0.53 spec:
1769       *
1770       *    "If the import is temporary, the implementation must restore the
1771       *    semaphore to its prior permanent state after submitting the next
1772       *    semaphore wait operation."
1773       *
1774       * This has to happen after the execbuf in case we close any syncobjs in
1775       * the process.
1776       */
1777      anv_semaphore_reset_temporary(device, semaphore);
1778   }
1779
1780   if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
1781      /* BO fences can't be shared, so they can't be temporary. */
1782      assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1783
1784      /* Once the execbuf has returned, we need to set the fence state to
1785       * SUBMITTED.  We can't do this before calling execbuf because
1786       * anv_GetFenceStatus does take the global device lock before checking
1787       * fence->state.
1788       *
1789       * We set the fence state to SUBMITTED regardless of whether or not the
1790       * execbuf succeeds because we need to ensure that vkWaitForFences() and
1791       * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1792       * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1793       */
1794      fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
1795   }
1796
1797   if (result == VK_SUCCESS && need_out_fence) {
1798      int out_fence = execbuf.execbuf.rsvd2 >> 32;
1799      for (uint32_t i = 0; i < num_out_semaphores; i++) {
1800         ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1801         /* Out fences can't have temporary state because that would imply
1802          * that we imported a sync file and are trying to signal it.
1803          */
1804         assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1805         struct anv_semaphore_impl *impl = &semaphore->permanent;
1806
1807         if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
1808            assert(impl->fd == -1);
1809            impl->fd = dup(out_fence);
1810         }
1811      }
1812      close(out_fence);
1813   }
1814
1815   anv_execbuf_finish(&execbuf, &device->alloc);
1816
1817   return result;
1818}
1819