1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef ANV_PRIVATE_H
25#define ANV_PRIVATE_H
26
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdbool.h>
30#include <pthread.h>
31#include <assert.h>
32#include <stdint.h>
33#include "drm-uapi/i915_drm.h"
34
35#ifdef HAVE_VALGRIND
36#include <valgrind.h>
37#include <memcheck.h>
38#define VG(x) x
39#ifndef NDEBUG
40#define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
41#endif
42#else
43#define VG(x)
44#endif
45
46#include "common/gen_clflush.h"
47#include "common/gen_decoder.h"
48#include "common/gen_gem.h"
49#include "dev/gen_device_info.h"
50#include "blorp/blorp.h"
51#include "compiler/brw_compiler.h"
52#include "util/macros.h"
53#include "util/hash_table.h"
54#include "util/list.h"
55#include "util/set.h"
56#include "util/u_atomic.h"
57#include "util/u_vector.h"
58#include "util/u_math.h"
59#include "util/vma.h"
60#include "util/xmlconfig.h"
61#include "vk_alloc.h"
62#include "vk_debug_report.h"
63
64/* Pre-declarations needed for WSI entrypoints */
65struct wl_surface;
66struct wl_display;
67typedef struct xcb_connection_t xcb_connection_t;
68typedef uint32_t xcb_visualid_t;
69typedef uint32_t xcb_window_t;
70
71struct anv_buffer;
72struct anv_buffer_view;
73struct anv_image_view;
74struct anv_instance;
75
76struct gen_l3_config;
77
78#include <vulkan/vulkan.h>
79#include <vulkan/vulkan_intel.h>
80#include <vulkan/vk_icd.h>
81
82#include "anv_android.h"
83#include "anv_entrypoints.h"
84#include "anv_extensions.h"
85#include "isl/isl.h"
86
87#include "dev/gen_debug.h"
88#include "common/intel_log.h"
89#include "wsi_common.h"
90
91/* anv Virtual Memory Layout
92 * =========================
93 *
94 * When the anv driver is determining the virtual graphics addresses of memory
95 * objects itself using the softpin mechanism, the following memory ranges
96 * will be used.
97 *
98 * Three special considerations to notice:
99 *
100 * (1) the dynamic state pool is located within the same 4 GiB as the low
101 * heap. This is to work around a VF cache issue described in a comment in
102 * anv_physical_device_init_heaps.
103 *
104 * (2) the binding table pool is located at lower addresses than the surface
105 * state pool, within a 4 GiB range. This allows surface state base addresses
106 * to cover both binding tables (16 bit offsets) and surface states (32 bit
107 * offsets).
108 *
109 * (3) the last 4 GiB of the address space is withheld from the high
110 * heap. Various hardware units will read past the end of an object for
111 * various reasons. This healthy margin prevents reads from wrapping around
112 * 48-bit addresses.
113 */
114#define LOW_HEAP_MIN_ADDRESS               0x000000001000ULL /* 4 KiB */
115#define LOW_HEAP_MAX_ADDRESS               0x0000bfffffffULL
116#define DYNAMIC_STATE_POOL_MIN_ADDRESS     0x0000c0000000ULL /* 3 GiB */
117#define DYNAMIC_STATE_POOL_MAX_ADDRESS     0x0000ffffffffULL
118#define BINDING_TABLE_POOL_MIN_ADDRESS     0x000100000000ULL /* 4 GiB */
119#define BINDING_TABLE_POOL_MAX_ADDRESS     0x00013fffffffULL
120#define SURFACE_STATE_POOL_MIN_ADDRESS     0x000140000000ULL /* 5 GiB */
121#define SURFACE_STATE_POOL_MAX_ADDRESS     0x00017fffffffULL
122#define INSTRUCTION_STATE_POOL_MIN_ADDRESS 0x000180000000ULL /* 6 GiB */
123#define INSTRUCTION_STATE_POOL_MAX_ADDRESS 0x0001bfffffffULL
124#define HIGH_HEAP_MIN_ADDRESS              0x0001c0000000ULL /* 7 GiB */
125
126#define LOW_HEAP_SIZE               \
127   (LOW_HEAP_MAX_ADDRESS - LOW_HEAP_MIN_ADDRESS + 1)
128#define DYNAMIC_STATE_POOL_SIZE     \
129   (DYNAMIC_STATE_POOL_MAX_ADDRESS - DYNAMIC_STATE_POOL_MIN_ADDRESS + 1)
130#define BINDING_TABLE_POOL_SIZE     \
131   (BINDING_TABLE_POOL_MAX_ADDRESS - BINDING_TABLE_POOL_MIN_ADDRESS + 1)
132#define SURFACE_STATE_POOL_SIZE     \
133   (SURFACE_STATE_POOL_MAX_ADDRESS - SURFACE_STATE_POOL_MIN_ADDRESS + 1)
134#define INSTRUCTION_STATE_POOL_SIZE \
135   (INSTRUCTION_STATE_POOL_MAX_ADDRESS - INSTRUCTION_STATE_POOL_MIN_ADDRESS + 1)
136
137/* Allowing different clear colors requires us to perform a depth resolve at
138 * the end of certain render passes. This is because while slow clears store
139 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
140 * See the PRMs for examples describing when additional resolves would be
141 * necessary. To enable fast clears without requiring extra resolves, we set
142 * the clear value to a globally-defined one. We could allow different values
143 * if the user doesn't expect coherent data during or after a render passes
144 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
145 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
146 * 1.0f seems to be the only value used. The only application that doesn't set
147 * this value does so through the usage of an seemingly uninitialized clear
148 * value.
149 */
150#define ANV_HZ_FC_VAL 1.0f
151
152#define MAX_VBS         28
153#define MAX_XFB_BUFFERS  4
154#define MAX_XFB_STREAMS  4
155#define MAX_SETS         8
156#define MAX_RTS          8
157#define MAX_VIEWPORTS   16
158#define MAX_SCISSORS    16
159#define MAX_PUSH_CONSTANTS_SIZE 128
160#define MAX_DYNAMIC_BUFFERS 16
161#define MAX_IMAGES 64
162#define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
163#define MAX_INLINE_UNIFORM_BLOCK_SIZE 4096
164#define MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS 32
165
166/* From the Skylake PRM Vol. 7 "Binding Table Surface State Model":
167 *
168 *    "The surface state model is used when a Binding Table Index (specified
169 *    in the message descriptor) of less than 240 is specified. In this model,
170 *    the Binding Table Index is used to index into the binding table, and the
171 *    binding table entry contains a pointer to the SURFACE_STATE."
172 *
173 * Binding table values above 240 are used for various things in the hardware
174 * such as stateless, stateless with incoherent cache, SLM, and bindless.
175 */
176#define MAX_BINDING_TABLE_SIZE 240
177
178/* The kernel relocation API has a limitation of a 32-bit delta value
179 * applied to the address before it is written which, in spite of it being
180 * unsigned, is treated as signed .  Because of the way that this maps to
181 * the Vulkan API, we cannot handle an offset into a buffer that does not
182 * fit into a signed 32 bits.  The only mechanism we have for dealing with
183 * this at the moment is to limit all VkDeviceMemory objects to a maximum
184 * of 2GB each.  The Vulkan spec allows us to do this:
185 *
186 *    "Some platforms may have a limit on the maximum size of a single
187 *    allocation. For example, certain systems may fail to create
188 *    allocations with a size greater than or equal to 4GB. Such a limit is
189 *    implementation-dependent, and if such a failure occurs then the error
190 *    VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
191 *
192 * We don't use vk_error here because it's not an error so much as an
193 * indication to the application that the allocation is too large.
194 */
195#define MAX_MEMORY_ALLOCATION_SIZE (1ull << 31)
196
197#define ANV_SVGS_VB_INDEX    MAX_VBS
198#define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
199
200/* We reserve this MI ALU register for the purpose of handling predication.
201 * Other code which uses the MI ALU should leave it alone.
202 */
203#define ANV_PREDICATE_RESULT_REG 0x2678 /* MI_ALU_REG15 */
204
205#define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
206
207static inline uint32_t
208align_down_npot_u32(uint32_t v, uint32_t a)
209{
210   return v - (v % a);
211}
212
213static inline uint32_t
214align_u32(uint32_t v, uint32_t a)
215{
216   assert(a != 0 && a == (a & -a));
217   return (v + a - 1) & ~(a - 1);
218}
219
220static inline uint64_t
221align_u64(uint64_t v, uint64_t a)
222{
223   assert(a != 0 && a == (a & -a));
224   return (v + a - 1) & ~(a - 1);
225}
226
227static inline int32_t
228align_i32(int32_t v, int32_t a)
229{
230   assert(a != 0 && a == (a & -a));
231   return (v + a - 1) & ~(a - 1);
232}
233
234/** Alignment must be a power of 2. */
235static inline bool
236anv_is_aligned(uintmax_t n, uintmax_t a)
237{
238   assert(a == (a & -a));
239   return (n & (a - 1)) == 0;
240}
241
242static inline uint32_t
243anv_minify(uint32_t n, uint32_t levels)
244{
245   if (unlikely(n == 0))
246      return 0;
247   else
248      return MAX2(n >> levels, 1);
249}
250
251static inline float
252anv_clamp_f(float f, float min, float max)
253{
254   assert(min < max);
255
256   if (f > max)
257      return max;
258   else if (f < min)
259      return min;
260   else
261      return f;
262}
263
264static inline bool
265anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
266{
267   if (*inout_mask & clear_mask) {
268      *inout_mask &= ~clear_mask;
269      return true;
270   } else {
271      return false;
272   }
273}
274
275static inline union isl_color_value
276vk_to_isl_color(VkClearColorValue color)
277{
278   return (union isl_color_value) {
279      .u32 = {
280         color.uint32[0],
281         color.uint32[1],
282         color.uint32[2],
283         color.uint32[3],
284      },
285   };
286}
287
288#define for_each_bit(b, dword)                          \
289   for (uint32_t __dword = (dword);                     \
290        (b) = __builtin_ffs(__dword) - 1, __dword;      \
291        __dword &= ~(1 << (b)))
292
293#define typed_memcpy(dest, src, count) ({ \
294   STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
295   memcpy((dest), (src), (count) * sizeof(*(src))); \
296})
297
298/* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
299 * to be added here in order to utilize mapping in debug/error/perf macros.
300 */
301#define REPORT_OBJECT_TYPE(o)                                                      \
302   __builtin_choose_expr (                                                         \
303   __builtin_types_compatible_p (__typeof (o), struct anv_instance*),              \
304   VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,                                       \
305   __builtin_choose_expr (                                                         \
306   __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*),       \
307   VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,                                \
308   __builtin_choose_expr (                                                         \
309   __builtin_types_compatible_p (__typeof (o), struct anv_device*),                \
310   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,                                         \
311   __builtin_choose_expr (                                                         \
312   __builtin_types_compatible_p (__typeof (o), const struct anv_device*),          \
313   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,                                         \
314   __builtin_choose_expr (                                                         \
315   __builtin_types_compatible_p (__typeof (o), struct anv_queue*),                 \
316   VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,                                          \
317   __builtin_choose_expr (                                                         \
318   __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*),             \
319   VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,                                      \
320   __builtin_choose_expr (                                                         \
321   __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*),            \
322   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,                                 \
323   __builtin_choose_expr (                                                         \
324   __builtin_types_compatible_p (__typeof (o), struct anv_fence*),                 \
325   VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,                                          \
326   __builtin_choose_expr (                                                         \
327   __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*),         \
328   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,                                  \
329   __builtin_choose_expr (                                                         \
330   __builtin_types_compatible_p (__typeof (o), struct anv_buffer*),                \
331   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,                                         \
332   __builtin_choose_expr (                                                         \
333   __builtin_types_compatible_p (__typeof (o), struct anv_image*),                 \
334   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,                                          \
335   __builtin_choose_expr (                                                         \
336   __builtin_types_compatible_p (__typeof (o), const struct anv_image*),           \
337   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,                                          \
338   __builtin_choose_expr (                                                         \
339   __builtin_types_compatible_p (__typeof (o), struct anv_event*),                 \
340   VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,                                          \
341   __builtin_choose_expr (                                                         \
342   __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*),            \
343   VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,                                     \
344   __builtin_choose_expr (                                                         \
345   __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*),           \
346   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,                                    \
347   __builtin_choose_expr (                                                         \
348   __builtin_types_compatible_p (__typeof (o), struct anv_image_view*),            \
349   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,                                     \
350   __builtin_choose_expr (                                                         \
351   __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*),         \
352   VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,                                  \
353   __builtin_choose_expr (                                                         \
354   __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*),        \
355   VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,                                 \
356   __builtin_choose_expr (                                                         \
357   __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*),       \
358   VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,                                \
359   __builtin_choose_expr (                                                         \
360   __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*),           \
361   VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,                                    \
362   __builtin_choose_expr (                                                         \
363   __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*),              \
364   VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,                                       \
365   __builtin_choose_expr (                                                         \
366   __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
367   VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,                          \
368   __builtin_choose_expr (                                                         \
369   __builtin_types_compatible_p (__typeof (o), struct anv_sampler*),               \
370   VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,                                        \
371   __builtin_choose_expr (                                                         \
372   __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*),       \
373   VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,                                \
374   __builtin_choose_expr (                                                         \
375   __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*),        \
376   VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,                                 \
377   __builtin_choose_expr (                                                         \
378   __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*),           \
379   VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,                                    \
380   __builtin_choose_expr (                                                         \
381   __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*),              \
382   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,                                   \
383   __builtin_choose_expr (                                                         \
384   __builtin_types_compatible_p (__typeof (o), struct anv_surface*),               \
385   VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT,                                    \
386   __builtin_choose_expr (                                                         \
387   __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*),             \
388   VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,                                  \
389   __builtin_choose_expr (                                                         \
390   __builtin_types_compatible_p (__typeof (o), struct vk_debug_callback*),         \
391   VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT,                      \
392   __builtin_choose_expr (                                                         \
393   __builtin_types_compatible_p (__typeof (o), void*),                             \
394   VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,                                        \
395   /* The void expression results in a compile-time error                          \
396      when assigning the result to something.  */                                  \
397   (void)0)))))))))))))))))))))))))))))))
398
399/* Whenever we generate an error, pass it through this function. Useful for
400 * debugging, where we can break on it. Only call at error site, not when
401 * propagating errors. Might be useful to plug in a stack trace here.
402 */
403
404VkResult __vk_errorv(struct anv_instance *instance, const void *object,
405                     VkDebugReportObjectTypeEXT type, VkResult error,
406                     const char *file, int line, const char *format,
407                     va_list args);
408
409VkResult __vk_errorf(struct anv_instance *instance, const void *object,
410                     VkDebugReportObjectTypeEXT type, VkResult error,
411                     const char *file, int line, const char *format, ...);
412
413#ifdef DEBUG
414#define vk_error(error) __vk_errorf(NULL, NULL,\
415                                    VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
416                                    error, __FILE__, __LINE__, NULL)
417#define vk_errorv(instance, obj, error, format, args)\
418    __vk_errorv(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
419                __FILE__, __LINE__, format, args)
420#define vk_errorf(instance, obj, error, format, ...)\
421    __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
422                __FILE__, __LINE__, format, ## __VA_ARGS__)
423#else
424#define vk_error(error) error
425#define vk_errorf(instance, obj, error, format, ...) error
426#endif
427
428/**
429 * Warn on ignored extension structs.
430 *
431 * The Vulkan spec requires us to ignore unsupported or unknown structs in
432 * a pNext chain.  In debug mode, emitting warnings for ignored structs may
433 * help us discover structs that we should not have ignored.
434 *
435 *
436 * From the Vulkan 1.0.38 spec:
437 *
438 *    Any component of the implementation (the loader, any enabled layers,
439 *    and drivers) must skip over, without processing (other than reading the
440 *    sType and pNext members) any chained structures with sType values not
441 *    defined by extensions supported by that component.
442 */
443#define anv_debug_ignored_stype(sType) \
444   intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
445
446void __anv_perf_warn(struct anv_instance *instance, const void *object,
447                     VkDebugReportObjectTypeEXT type, const char *file,
448                     int line, const char *format, ...)
449   anv_printflike(6, 7);
450void anv_loge(const char *format, ...) anv_printflike(1, 2);
451void anv_loge_v(const char *format, va_list va);
452
453/**
454 * Print a FINISHME message, including its source location.
455 */
456#define anv_finishme(format, ...) \
457   do { \
458      static bool reported = false; \
459      if (!reported) { \
460         intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
461                    ##__VA_ARGS__); \
462         reported = true; \
463      } \
464   } while (0)
465
466/**
467 * Print a perf warning message.  Set INTEL_DEBUG=perf to see these.
468 */
469#define anv_perf_warn(instance, obj, format, ...) \
470   do { \
471      static bool reported = false; \
472      if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
473         __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
474                         format, ##__VA_ARGS__); \
475         reported = true; \
476      } \
477   } while (0)
478
479/* A non-fatal assert.  Useful for debugging. */
480#ifdef DEBUG
481#define anv_assert(x) ({ \
482   if (unlikely(!(x))) \
483      intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
484})
485#else
486#define anv_assert(x)
487#endif
488
489/* A multi-pointer allocator
490 *
491 * When copying data structures from the user (such as a render pass), it's
492 * common to need to allocate data for a bunch of different things.  Instead
493 * of doing several allocations and having to handle all of the error checking
494 * that entails, it can be easier to do a single allocation.  This struct
495 * helps facilitate that.  The intended usage looks like this:
496 *
497 *    ANV_MULTIALLOC(ma)
498 *    anv_multialloc_add(&ma, &main_ptr, 1);
499 *    anv_multialloc_add(&ma, &substruct1, substruct1Count);
500 *    anv_multialloc_add(&ma, &substruct2, substruct2Count);
501 *
502 *    if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
503 *       return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
504 */
505struct anv_multialloc {
506    size_t size;
507    size_t align;
508
509    uint32_t ptr_count;
510    void **ptrs[8];
511};
512
513#define ANV_MULTIALLOC_INIT \
514   ((struct anv_multialloc) { 0, })
515
516#define ANV_MULTIALLOC(_name) \
517   struct anv_multialloc _name = ANV_MULTIALLOC_INIT
518
519__attribute__((always_inline))
520static inline void
521_anv_multialloc_add(struct anv_multialloc *ma,
522                    void **ptr, size_t size, size_t align)
523{
524   size_t offset = align_u64(ma->size, align);
525   ma->size = offset + size;
526   ma->align = MAX2(ma->align, align);
527
528   /* Store the offset in the pointer. */
529   *ptr = (void *)(uintptr_t)offset;
530
531   assert(ma->ptr_count < ARRAY_SIZE(ma->ptrs));
532   ma->ptrs[ma->ptr_count++] = ptr;
533}
534
535#define anv_multialloc_add_size(_ma, _ptr, _size) \
536   _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
537
538#define anv_multialloc_add(_ma, _ptr, _count) \
539   anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
540
541__attribute__((always_inline))
542static inline void *
543anv_multialloc_alloc(struct anv_multialloc *ma,
544                     const VkAllocationCallbacks *alloc,
545                     VkSystemAllocationScope scope)
546{
547   void *ptr = vk_alloc(alloc, ma->size, ma->align, scope);
548   if (!ptr)
549      return NULL;
550
551   /* Fill out each of the pointers with their final value.
552    *
553    *   for (uint32_t i = 0; i < ma->ptr_count; i++)
554    *      *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
555    *
556    * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
557    * constant, GCC is incapable of figuring this out and unrolling the loop
558    * so we have to give it a little help.
559    */
560   STATIC_ASSERT(ARRAY_SIZE(ma->ptrs) == 8);
561#define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
562   if ((_i) < ma->ptr_count) \
563      *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
564   _ANV_MULTIALLOC_UPDATE_POINTER(0);
565   _ANV_MULTIALLOC_UPDATE_POINTER(1);
566   _ANV_MULTIALLOC_UPDATE_POINTER(2);
567   _ANV_MULTIALLOC_UPDATE_POINTER(3);
568   _ANV_MULTIALLOC_UPDATE_POINTER(4);
569   _ANV_MULTIALLOC_UPDATE_POINTER(5);
570   _ANV_MULTIALLOC_UPDATE_POINTER(6);
571   _ANV_MULTIALLOC_UPDATE_POINTER(7);
572#undef _ANV_MULTIALLOC_UPDATE_POINTER
573
574   return ptr;
575}
576
577__attribute__((always_inline))
578static inline void *
579anv_multialloc_alloc2(struct anv_multialloc *ma,
580                      const VkAllocationCallbacks *parent_alloc,
581                      const VkAllocationCallbacks *alloc,
582                      VkSystemAllocationScope scope)
583{
584   return anv_multialloc_alloc(ma, alloc ? alloc : parent_alloc, scope);
585}
586
587/* Extra ANV-defined BO flags which won't be passed to the kernel */
588#define ANV_BO_EXTERNAL    (1ull << 31)
589#define ANV_BO_FLAG_MASK   (1ull << 31)
590
591struct anv_bo {
592   uint32_t gem_handle;
593
594   /* Index into the current validation list.  This is used by the
595    * validation list building alrogithm to track which buffers are already
596    * in the validation list so that we can ensure uniqueness.
597    */
598   uint32_t index;
599
600   /* Last known offset.  This value is provided by the kernel when we
601    * execbuf and is used as the presumed offset for the next bunch of
602    * relocations.
603    */
604   uint64_t offset;
605
606   uint64_t size;
607   void *map;
608
609   /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
610   uint32_t flags;
611};
612
613static inline void
614anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size)
615{
616   bo->gem_handle = gem_handle;
617   bo->index = 0;
618   bo->offset = -1;
619   bo->size = size;
620   bo->map = NULL;
621   bo->flags = 0;
622}
623
624/* Represents a lock-free linked list of "free" things.  This is used by
625 * both the block pool and the state pools.  Unfortunately, in order to
626 * solve the ABA problem, we can't use a single uint32_t head.
627 */
628union anv_free_list {
629   struct {
630      uint32_t offset;
631
632      /* A simple count that is incremented every time the head changes. */
633      uint32_t count;
634   };
635   uint64_t u64;
636};
637
638#define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { UINT32_MAX, 0 } })
639
640struct anv_block_state {
641   union {
642      struct {
643         uint32_t next;
644         uint32_t end;
645      };
646      uint64_t u64;
647   };
648};
649
650#define anv_block_pool_foreach_bo(bo, pool)  \
651   for (bo = (pool)->bos; bo != &(pool)->bos[(pool)->nbos]; bo++)
652
653#define ANV_MAX_BLOCK_POOL_BOS 20
654
655struct anv_block_pool {
656   struct anv_device *device;
657
658   uint64_t bo_flags;
659
660   struct anv_bo bos[ANV_MAX_BLOCK_POOL_BOS];
661   struct anv_bo *bo;
662   uint32_t nbos;
663
664   uint64_t size;
665
666   /* The address where the start of the pool is pinned. The various bos that
667    * are created as the pool grows will have addresses in the range
668    * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
669    */
670   uint64_t start_address;
671
672   /* The offset from the start of the bo to the "center" of the block
673    * pool.  Pointers to allocated blocks are given by
674    * bo.map + center_bo_offset + offsets.
675    */
676   uint32_t center_bo_offset;
677
678   /* Current memory map of the block pool.  This pointer may or may not
679    * point to the actual beginning of the block pool memory.  If
680    * anv_block_pool_alloc_back has ever been called, then this pointer
681    * will point to the "center" position of the buffer and all offsets
682    * (negative or positive) given out by the block pool alloc functions
683    * will be valid relative to this pointer.
684    *
685    * In particular, map == bo.map + center_offset
686    *
687    * DO NOT access this pointer directly. Use anv_block_pool_map() instead,
688    * since it will handle the softpin case as well, where this points to NULL.
689    */
690   void *map;
691   int fd;
692
693   /**
694    * Array of mmaps and gem handles owned by the block pool, reclaimed when
695    * the block pool is destroyed.
696    */
697   struct u_vector mmap_cleanups;
698
699   struct anv_block_state state;
700
701   struct anv_block_state back_state;
702};
703
704/* Block pools are backed by a fixed-size 1GB memfd */
705#define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
706
707/* The center of the block pool is also the middle of the memfd.  This may
708 * change in the future if we decide differently for some reason.
709 */
710#define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
711
712static inline uint32_t
713anv_block_pool_size(struct anv_block_pool *pool)
714{
715   return pool->state.end + pool->back_state.end;
716}
717
718struct anv_state {
719   int32_t offset;
720   uint32_t alloc_size;
721   void *map;
722   uint32_t idx;
723};
724
725#define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
726
727struct anv_fixed_size_state_pool {
728   union anv_free_list free_list;
729   struct anv_block_state block;
730};
731
732#define ANV_MIN_STATE_SIZE_LOG2 6
733#define ANV_MAX_STATE_SIZE_LOG2 20
734
735#define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
736
737struct anv_free_entry {
738   uint32_t next;
739   struct anv_state state;
740};
741
742struct anv_state_table {
743   struct anv_device *device;
744   int fd;
745   struct anv_free_entry *map;
746   uint32_t size;
747   struct anv_block_state state;
748   struct u_vector cleanups;
749};
750
751struct anv_state_pool {
752   struct anv_block_pool block_pool;
753
754   struct anv_state_table table;
755
756   /* The size of blocks which will be allocated from the block pool */
757   uint32_t block_size;
758
759   /** Free list for "back" allocations */
760   union anv_free_list back_alloc_free_list;
761
762   struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
763};
764
765struct anv_state_stream_block;
766
767struct anv_state_stream {
768   struct anv_state_pool *state_pool;
769
770   /* The size of blocks to allocate from the state pool */
771   uint32_t block_size;
772
773   /* Current block we're allocating from */
774   struct anv_state block;
775
776   /* Offset into the current block at which to allocate the next state */
777   uint32_t next;
778
779   /* List of all blocks allocated from this pool */
780   struct anv_state_stream_block *block_list;
781};
782
783/* The block_pool functions exported for testing only.  The block pool should
784 * only be used via a state pool (see below).
785 */
786VkResult anv_block_pool_init(struct anv_block_pool *pool,
787                             struct anv_device *device,
788                             uint64_t start_address,
789                             uint32_t initial_size,
790                             uint64_t bo_flags);
791void anv_block_pool_finish(struct anv_block_pool *pool);
792int32_t anv_block_pool_alloc(struct anv_block_pool *pool,
793                             uint32_t block_size, uint32_t *padding);
794int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
795                                  uint32_t block_size);
796void* anv_block_pool_map(struct anv_block_pool *pool, int32_t offset);
797
798VkResult anv_state_pool_init(struct anv_state_pool *pool,
799                             struct anv_device *device,
800                             uint64_t start_address,
801                             uint32_t block_size,
802                             uint64_t bo_flags);
803void anv_state_pool_finish(struct anv_state_pool *pool);
804struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
805                                      uint32_t state_size, uint32_t alignment);
806struct anv_state anv_state_pool_alloc_back(struct anv_state_pool *pool);
807void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
808void anv_state_stream_init(struct anv_state_stream *stream,
809                           struct anv_state_pool *state_pool,
810                           uint32_t block_size);
811void anv_state_stream_finish(struct anv_state_stream *stream);
812struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
813                                        uint32_t size, uint32_t alignment);
814
815VkResult anv_state_table_init(struct anv_state_table *table,
816                             struct anv_device *device,
817                             uint32_t initial_entries);
818void anv_state_table_finish(struct anv_state_table *table);
819VkResult anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
820                             uint32_t count);
821void anv_free_list_push(union anv_free_list *list,
822                        struct anv_state_table *table,
823                        uint32_t idx, uint32_t count);
824struct anv_state* anv_free_list_pop(union anv_free_list *list,
825                                    struct anv_state_table *table);
826
827
828static inline struct anv_state *
829anv_state_table_get(struct anv_state_table *table, uint32_t idx)
830{
831   return &table->map[idx].state;
832}
833/**
834 * Implements a pool of re-usable BOs.  The interface is identical to that
835 * of block_pool except that each block is its own BO.
836 */
837struct anv_bo_pool {
838   struct anv_device *device;
839
840   uint64_t bo_flags;
841
842   void *free_list[16];
843};
844
845void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
846                      uint64_t bo_flags);
847void anv_bo_pool_finish(struct anv_bo_pool *pool);
848VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
849                           uint32_t size);
850void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
851
852struct anv_scratch_bo {
853   bool exists;
854   struct anv_bo bo;
855};
856
857struct anv_scratch_pool {
858   /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
859   struct anv_scratch_bo bos[16][MESA_SHADER_STAGES];
860};
861
862void anv_scratch_pool_init(struct anv_device *device,
863                           struct anv_scratch_pool *pool);
864void anv_scratch_pool_finish(struct anv_device *device,
865                             struct anv_scratch_pool *pool);
866struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
867                                      struct anv_scratch_pool *pool,
868                                      gl_shader_stage stage,
869                                      unsigned per_thread_scratch);
870
871/** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
872struct anv_bo_cache {
873   struct hash_table *bo_map;
874   pthread_mutex_t mutex;
875};
876
877VkResult anv_bo_cache_init(struct anv_bo_cache *cache);
878void anv_bo_cache_finish(struct anv_bo_cache *cache);
879VkResult anv_bo_cache_alloc(struct anv_device *device,
880                            struct anv_bo_cache *cache,
881                            uint64_t size, uint64_t bo_flags,
882                            struct anv_bo **bo);
883VkResult anv_bo_cache_import_host_ptr(struct anv_device *device,
884                                      struct anv_bo_cache *cache,
885                                      void *host_ptr, uint32_t size,
886                                      uint64_t bo_flags, struct anv_bo **bo_out);
887VkResult anv_bo_cache_import(struct anv_device *device,
888                             struct anv_bo_cache *cache,
889                             int fd, uint64_t bo_flags,
890                             struct anv_bo **bo);
891VkResult anv_bo_cache_export(struct anv_device *device,
892                             struct anv_bo_cache *cache,
893                             struct anv_bo *bo_in, int *fd_out);
894void anv_bo_cache_release(struct anv_device *device,
895                          struct anv_bo_cache *cache,
896                          struct anv_bo *bo);
897
898struct anv_memory_type {
899   /* Standard bits passed on to the client */
900   VkMemoryPropertyFlags   propertyFlags;
901   uint32_t                heapIndex;
902
903   /* Driver-internal book-keeping */
904   VkBufferUsageFlags      valid_buffer_usage;
905};
906
907struct anv_memory_heap {
908   /* Standard bits passed on to the client */
909   VkDeviceSize      size;
910   VkMemoryHeapFlags flags;
911
912   /* Driver-internal book-keeping */
913   uint64_t          vma_start;
914   uint64_t          vma_size;
915   bool              supports_48bit_addresses;
916   VkDeviceSize      used;
917};
918
919struct anv_physical_device {
920    VK_LOADER_DATA                              _loader_data;
921
922    struct anv_instance *                       instance;
923    uint32_t                                    chipset_id;
924    bool                                        no_hw;
925    char                                        path[20];
926    const char *                                name;
927    struct {
928       uint16_t                                 domain;
929       uint8_t                                  bus;
930       uint8_t                                  device;
931       uint8_t                                  function;
932    }                                           pci_info;
933    struct gen_device_info                      info;
934    /** Amount of "GPU memory" we want to advertise
935     *
936     * Clearly, this value is bogus since Intel is a UMA architecture.  On
937     * gen7 platforms, we are limited by GTT size unless we want to implement
938     * fine-grained tracking and GTT splitting.  On Broadwell and above we are
939     * practically unlimited.  However, we will never report more than 3/4 of
940     * the total system ram to try and avoid running out of RAM.
941     */
942    bool                                        supports_48bit_addresses;
943    struct brw_compiler *                       compiler;
944    struct isl_device                           isl_dev;
945    int                                         cmd_parser_version;
946    bool                                        has_exec_async;
947    bool                                        has_exec_capture;
948    bool                                        has_exec_fence;
949    bool                                        has_syncobj;
950    bool                                        has_syncobj_wait;
951    bool                                        has_context_priority;
952    bool                                        use_softpin;
953    bool                                        has_context_isolation;
954    bool                                        has_mem_available;
955    bool                                        always_use_bindless;
956
957    /** True if we can access buffers using A64 messages */
958    bool                                        has_a64_buffer_access;
959    /** True if we can use bindless access for images */
960    bool                                        has_bindless_images;
961    /** True if we can use bindless access for samplers */
962    bool                                        has_bindless_samplers;
963
964    struct anv_device_extension_table           supported_extensions;
965
966    uint32_t                                    eu_total;
967    uint32_t                                    subslice_total;
968
969    struct {
970      uint32_t                                  type_count;
971      struct anv_memory_type                    types[VK_MAX_MEMORY_TYPES];
972      uint32_t                                  heap_count;
973      struct anv_memory_heap                    heaps[VK_MAX_MEMORY_HEAPS];
974    } memory;
975
976    uint8_t                                     driver_build_sha1[20];
977    uint8_t                                     pipeline_cache_uuid[VK_UUID_SIZE];
978    uint8_t                                     driver_uuid[VK_UUID_SIZE];
979    uint8_t                                     device_uuid[VK_UUID_SIZE];
980
981    struct disk_cache *                         disk_cache;
982
983    struct wsi_device                       wsi_device;
984    int                                         local_fd;
985    int                                         master_fd;
986};
987
988struct anv_app_info {
989   const char*        app_name;
990   uint32_t           app_version;
991   const char*        engine_name;
992   uint32_t           engine_version;
993   uint32_t           api_version;
994};
995
996struct anv_instance {
997    VK_LOADER_DATA                              _loader_data;
998
999    VkAllocationCallbacks                       alloc;
1000
1001    struct anv_app_info                         app_info;
1002
1003    struct anv_instance_extension_table         enabled_extensions;
1004    struct anv_instance_dispatch_table          dispatch;
1005    struct anv_device_dispatch_table            device_dispatch;
1006
1007    int                                         physicalDeviceCount;
1008    struct anv_physical_device                  physicalDevice;
1009
1010    bool                                        pipeline_cache_enabled;
1011
1012    struct vk_debug_report_instance             debug_report_callbacks;
1013
1014    struct driOptionCache                       dri_options;
1015    struct driOptionCache                       available_dri_options;
1016};
1017
1018VkResult anv_init_wsi(struct anv_physical_device *physical_device);
1019void anv_finish_wsi(struct anv_physical_device *physical_device);
1020
1021uint32_t anv_physical_device_api_version(struct anv_physical_device *dev);
1022bool anv_physical_device_extension_supported(struct anv_physical_device *dev,
1023                                             const char *name);
1024
1025struct anv_queue {
1026    VK_LOADER_DATA                              _loader_data;
1027
1028    struct anv_device *                         device;
1029
1030    VkDeviceQueueCreateFlags                    flags;
1031};
1032
1033struct anv_pipeline_cache {
1034   struct anv_device *                          device;
1035   pthread_mutex_t                              mutex;
1036
1037   struct hash_table *                          nir_cache;
1038
1039   struct hash_table *                          cache;
1040};
1041
1042struct nir_xfb_info;
1043struct anv_pipeline_bind_map;
1044
1045void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
1046                             struct anv_device *device,
1047                             bool cache_enabled);
1048void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
1049
1050struct anv_shader_bin *
1051anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
1052                          const void *key, uint32_t key_size);
1053struct anv_shader_bin *
1054anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
1055                                 const void *key_data, uint32_t key_size,
1056                                 const void *kernel_data, uint32_t kernel_size,
1057                                 const void *constant_data,
1058                                 uint32_t constant_data_size,
1059                                 const struct brw_stage_prog_data *prog_data,
1060                                 uint32_t prog_data_size,
1061                                 const struct nir_xfb_info *xfb_info,
1062                                 const struct anv_pipeline_bind_map *bind_map);
1063
1064struct anv_shader_bin *
1065anv_device_search_for_kernel(struct anv_device *device,
1066                             struct anv_pipeline_cache *cache,
1067                             const void *key_data, uint32_t key_size,
1068                             bool *user_cache_bit);
1069
1070struct anv_shader_bin *
1071anv_device_upload_kernel(struct anv_device *device,
1072                         struct anv_pipeline_cache *cache,
1073                         const void *key_data, uint32_t key_size,
1074                         const void *kernel_data, uint32_t kernel_size,
1075                         const void *constant_data,
1076                         uint32_t constant_data_size,
1077                         const struct brw_stage_prog_data *prog_data,
1078                         uint32_t prog_data_size,
1079                         const struct nir_xfb_info *xfb_info,
1080                         const struct anv_pipeline_bind_map *bind_map);
1081
1082struct nir_shader;
1083struct nir_shader_compiler_options;
1084
1085struct nir_shader *
1086anv_device_search_for_nir(struct anv_device *device,
1087                          struct anv_pipeline_cache *cache,
1088                          const struct nir_shader_compiler_options *nir_options,
1089                          unsigned char sha1_key[20],
1090                          void *mem_ctx);
1091
1092void
1093anv_device_upload_nir(struct anv_device *device,
1094                      struct anv_pipeline_cache *cache,
1095                      const struct nir_shader *nir,
1096                      unsigned char sha1_key[20]);
1097
1098struct anv_device {
1099    VK_LOADER_DATA                              _loader_data;
1100
1101    VkAllocationCallbacks                       alloc;
1102
1103    struct anv_instance *                       instance;
1104    uint32_t                                    chipset_id;
1105    bool                                        no_hw;
1106    struct gen_device_info                      info;
1107    struct isl_device                           isl_dev;
1108    int                                         context_id;
1109    int                                         fd;
1110    bool                                        can_chain_batches;
1111    bool                                        robust_buffer_access;
1112    struct anv_device_extension_table           enabled_extensions;
1113    struct anv_device_dispatch_table            dispatch;
1114
1115    pthread_mutex_t                             vma_mutex;
1116    struct util_vma_heap                        vma_lo;
1117    struct util_vma_heap                        vma_hi;
1118    uint64_t                                    vma_lo_available;
1119    uint64_t                                    vma_hi_available;
1120
1121    /** List of all anv_device_memory objects */
1122    struct list_head                            memory_objects;
1123
1124    struct anv_bo_pool                          batch_bo_pool;
1125
1126    struct anv_bo_cache                         bo_cache;
1127
1128    struct anv_state_pool                       dynamic_state_pool;
1129    struct anv_state_pool                       instruction_state_pool;
1130    struct anv_state_pool                       binding_table_pool;
1131    struct anv_state_pool                       surface_state_pool;
1132
1133    struct anv_bo                               workaround_bo;
1134    struct anv_bo                               trivial_batch_bo;
1135    struct anv_bo                               hiz_clear_bo;
1136
1137    struct anv_pipeline_cache                   default_pipeline_cache;
1138    struct blorp_context                        blorp;
1139
1140    struct anv_state                            border_colors;
1141
1142    struct anv_queue                            queue;
1143
1144    struct anv_scratch_pool                     scratch_pool;
1145
1146    uint32_t                                    default_mocs;
1147    uint32_t                                    external_mocs;
1148
1149    pthread_mutex_t                             mutex;
1150    pthread_cond_t                              queue_submit;
1151    bool                                        _lost;
1152
1153    struct gen_batch_decode_ctx                 decoder_ctx;
1154    /*
1155     * When decoding a anv_cmd_buffer, we might need to search for BOs through
1156     * the cmd_buffer's list.
1157     */
1158    struct anv_cmd_buffer                      *cmd_buffer_being_decoded;
1159};
1160
1161static inline struct anv_state_pool *
1162anv_binding_table_pool(struct anv_device *device)
1163{
1164   if (device->instance->physicalDevice.use_softpin)
1165      return &device->binding_table_pool;
1166   else
1167      return &device->surface_state_pool;
1168}
1169
1170static inline struct anv_state
1171anv_binding_table_pool_alloc(struct anv_device *device) {
1172   if (device->instance->physicalDevice.use_softpin)
1173      return anv_state_pool_alloc(&device->binding_table_pool,
1174                                  device->binding_table_pool.block_size, 0);
1175   else
1176      return anv_state_pool_alloc_back(&device->surface_state_pool);
1177}
1178
1179static inline void
1180anv_binding_table_pool_free(struct anv_device *device, struct anv_state state) {
1181   anv_state_pool_free(anv_binding_table_pool(device), state);
1182}
1183
1184static inline uint32_t
1185anv_mocs_for_bo(const struct anv_device *device, const struct anv_bo *bo)
1186{
1187   if (bo->flags & ANV_BO_EXTERNAL)
1188      return device->external_mocs;
1189   else
1190      return device->default_mocs;
1191}
1192
1193void anv_device_init_blorp(struct anv_device *device);
1194void anv_device_finish_blorp(struct anv_device *device);
1195
1196VkResult _anv_device_set_lost(struct anv_device *device,
1197                              const char *file, int line,
1198                              const char *msg, ...);
1199#define anv_device_set_lost(dev, ...) \
1200   _anv_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
1201
1202static inline bool
1203anv_device_is_lost(struct anv_device *device)
1204{
1205   return unlikely(device->_lost);
1206}
1207
1208VkResult anv_device_execbuf(struct anv_device *device,
1209                            struct drm_i915_gem_execbuffer2 *execbuf,
1210                            struct anv_bo **execbuf_bos);
1211VkResult anv_device_query_status(struct anv_device *device);
1212VkResult anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo);
1213VkResult anv_device_wait(struct anv_device *device, struct anv_bo *bo,
1214                         int64_t timeout);
1215
1216void* anv_gem_mmap(struct anv_device *device,
1217                   uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
1218void anv_gem_munmap(void *p, uint64_t size);
1219uint32_t anv_gem_create(struct anv_device *device, uint64_t size);
1220void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
1221uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
1222int anv_gem_busy(struct anv_device *device, uint32_t gem_handle);
1223int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
1224int anv_gem_execbuffer(struct anv_device *device,
1225                       struct drm_i915_gem_execbuffer2 *execbuf);
1226int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
1227                       uint32_t stride, uint32_t tiling);
1228int anv_gem_create_context(struct anv_device *device);
1229bool anv_gem_has_context_priority(int fd);
1230int anv_gem_destroy_context(struct anv_device *device, int context);
1231int anv_gem_set_context_param(int fd, int context, uint32_t param,
1232                              uint64_t value);
1233int anv_gem_get_context_param(int fd, int context, uint32_t param,
1234                              uint64_t *value);
1235int anv_gem_get_param(int fd, uint32_t param);
1236int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
1237bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
1238int anv_gem_get_aperture(int fd, uint64_t *size);
1239int anv_gem_gpu_get_reset_stats(struct anv_device *device,
1240                                uint32_t *active, uint32_t *pending);
1241int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
1242int anv_gem_reg_read(struct anv_device *device,
1243                     uint32_t offset, uint64_t *result);
1244uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
1245int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
1246int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
1247                       uint32_t read_domains, uint32_t write_domain);
1248int anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2);
1249uint32_t anv_gem_syncobj_create(struct anv_device *device, uint32_t flags);
1250void anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle);
1251int anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle);
1252uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd);
1253int anv_gem_syncobj_export_sync_file(struct anv_device *device,
1254                                     uint32_t handle);
1255int anv_gem_syncobj_import_sync_file(struct anv_device *device,
1256                                     uint32_t handle, int fd);
1257void anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle);
1258bool anv_gem_supports_syncobj_wait(int fd);
1259int anv_gem_syncobj_wait(struct anv_device *device,
1260                         uint32_t *handles, uint32_t num_handles,
1261                         int64_t abs_timeout_ns, bool wait_all);
1262
1263bool anv_vma_alloc(struct anv_device *device, struct anv_bo *bo);
1264void anv_vma_free(struct anv_device *device, struct anv_bo *bo);
1265
1266VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
1267
1268struct anv_reloc_list {
1269   uint32_t                                     num_relocs;
1270   uint32_t                                     array_length;
1271   struct drm_i915_gem_relocation_entry *       relocs;
1272   struct anv_bo **                             reloc_bos;
1273   struct set *                                 deps;
1274};
1275
1276VkResult anv_reloc_list_init(struct anv_reloc_list *list,
1277                             const VkAllocationCallbacks *alloc);
1278void anv_reloc_list_finish(struct anv_reloc_list *list,
1279                           const VkAllocationCallbacks *alloc);
1280
1281VkResult anv_reloc_list_add(struct anv_reloc_list *list,
1282                            const VkAllocationCallbacks *alloc,
1283                            uint32_t offset, struct anv_bo *target_bo,
1284                            uint32_t delta);
1285
1286struct anv_batch_bo {
1287   /* Link in the anv_cmd_buffer.owned_batch_bos list */
1288   struct list_head                             link;
1289
1290   struct anv_bo                                bo;
1291
1292   /* Bytes actually consumed in this batch BO */
1293   uint32_t                                     length;
1294
1295   struct anv_reloc_list                        relocs;
1296};
1297
1298struct anv_batch {
1299   const VkAllocationCallbacks *                alloc;
1300
1301   void *                                       start;
1302   void *                                       end;
1303   void *                                       next;
1304
1305   struct anv_reloc_list *                      relocs;
1306
1307   /* This callback is called (with the associated user data) in the event
1308    * that the batch runs out of space.
1309    */
1310   VkResult (*extend_cb)(struct anv_batch *, void *);
1311   void *                                       user_data;
1312
1313   /**
1314    * Current error status of the command buffer. Used to track inconsistent
1315    * or incomplete command buffer states that are the consequence of run-time
1316    * errors such as out of memory scenarios. We want to track this in the
1317    * batch because the command buffer object is not visible to some parts
1318    * of the driver.
1319    */
1320   VkResult                                     status;
1321};
1322
1323void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
1324void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
1325uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
1326                              void *location, struct anv_bo *bo, uint32_t offset);
1327VkResult anv_device_submit_simple_batch(struct anv_device *device,
1328                                        struct anv_batch *batch);
1329
1330static inline VkResult
1331anv_batch_set_error(struct anv_batch *batch, VkResult error)
1332{
1333   assert(error != VK_SUCCESS);
1334   if (batch->status == VK_SUCCESS)
1335      batch->status = error;
1336   return batch->status;
1337}
1338
1339static inline bool
1340anv_batch_has_error(struct anv_batch *batch)
1341{
1342   return batch->status != VK_SUCCESS;
1343}
1344
1345struct anv_address {
1346   struct anv_bo *bo;
1347   uint32_t offset;
1348};
1349
1350#define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
1351
1352static inline bool
1353anv_address_is_null(struct anv_address addr)
1354{
1355   return addr.bo == NULL && addr.offset == 0;
1356}
1357
1358static inline uint64_t
1359anv_address_physical(struct anv_address addr)
1360{
1361   if (addr.bo && (addr.bo->flags & EXEC_OBJECT_PINNED))
1362      return gen_canonical_address(addr.bo->offset + addr.offset);
1363   else
1364      return gen_canonical_address(addr.offset);
1365}
1366
1367static inline struct anv_address
1368anv_address_add(struct anv_address addr, uint64_t offset)
1369{
1370   addr.offset += offset;
1371   return addr;
1372}
1373
1374static inline void
1375write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
1376{
1377   unsigned reloc_size = 0;
1378   if (device->info.gen >= 8) {
1379      reloc_size = sizeof(uint64_t);
1380      *(uint64_t *)p = gen_canonical_address(v);
1381   } else {
1382      reloc_size = sizeof(uint32_t);
1383      *(uint32_t *)p = v;
1384   }
1385
1386   if (flush && !device->info.has_llc)
1387      gen_flush_range(p, reloc_size);
1388}
1389
1390static inline uint64_t
1391_anv_combine_address(struct anv_batch *batch, void *location,
1392                     const struct anv_address address, uint32_t delta)
1393{
1394   if (address.bo == NULL) {
1395      return address.offset + delta;
1396   } else {
1397      assert(batch->start <= location && location < batch->end);
1398
1399      return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
1400   }
1401}
1402
1403#define __gen_address_type struct anv_address
1404#define __gen_user_data struct anv_batch
1405#define __gen_combine_address _anv_combine_address
1406
1407/* Wrapper macros needed to work around preprocessor argument issues.  In
1408 * particular, arguments don't get pre-evaluated if they are concatenated.
1409 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1410 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1411 * We can work around this easily enough with these helpers.
1412 */
1413#define __anv_cmd_length(cmd) cmd ## _length
1414#define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1415#define __anv_cmd_header(cmd) cmd ## _header
1416#define __anv_cmd_pack(cmd) cmd ## _pack
1417#define __anv_reg_num(reg) reg ## _num
1418
1419#define anv_pack_struct(dst, struc, ...) do {                              \
1420      struct struc __template = {                                          \
1421         __VA_ARGS__                                                       \
1422      };                                                                   \
1423      __anv_cmd_pack(struc)(NULL, dst, &__template);                       \
1424      VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1425   } while (0)
1426
1427#define anv_batch_emitn(batch, n, cmd, ...) ({             \
1428      void *__dst = anv_batch_emit_dwords(batch, n);       \
1429      if (__dst) {                                         \
1430         struct cmd __template = {                         \
1431            __anv_cmd_header(cmd),                         \
1432           .DWordLength = n - __anv_cmd_length_bias(cmd),  \
1433            __VA_ARGS__                                    \
1434         };                                                \
1435         __anv_cmd_pack(cmd)(batch, __dst, &__template);   \
1436      }                                                    \
1437      __dst;                                               \
1438   })
1439
1440#define anv_batch_emit_merge(batch, dwords0, dwords1)                   \
1441   do {                                                                 \
1442      uint32_t *dw;                                                     \
1443                                                                        \
1444      STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1));        \
1445      dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0));         \
1446      if (!dw)                                                          \
1447         break;                                                         \
1448      for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++)                \
1449         dw[i] = (dwords0)[i] | (dwords1)[i];                           \
1450      VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1451   } while (0)
1452
1453#define anv_batch_emit(batch, cmd, name)                            \
1454   for (struct cmd name = { __anv_cmd_header(cmd) },                    \
1455        *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd));    \
1456        __builtin_expect(_dst != NULL, 1);                              \
1457        ({ __anv_cmd_pack(cmd)(batch, _dst, &name);                     \
1458           VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1459           _dst = NULL;                                                 \
1460         }))
1461
1462/* MEMORY_OBJECT_CONTROL_STATE:
1463 * .GraphicsDataTypeGFDT                        = 0,
1464 * .LLCCacheabilityControlLLCCC                 = 0,
1465 * .L3CacheabilityControlL3CC                   = 1,
1466 */
1467#define GEN7_MOCS 1
1468
1469/* MEMORY_OBJECT_CONTROL_STATE:
1470 * .LLCeLLCCacheabilityControlLLCCC             = 0,
1471 * .L3CacheabilityControlL3CC                   = 1,
1472 */
1473#define GEN75_MOCS 1
1474
1475/* MEMORY_OBJECT_CONTROL_STATE:
1476 * .MemoryTypeLLCeLLCCacheabilityControl = WB,
1477 * .TargetCache = L3DefertoPATforLLCeLLCselection,
1478 * .AgeforQUADLRU = 0
1479 */
1480#define GEN8_MOCS 0x78
1481
1482/* MEMORY_OBJECT_CONTROL_STATE:
1483 * .MemoryTypeLLCeLLCCacheabilityControl = UCwithFenceifcoherentcycle,
1484 * .TargetCache = L3DefertoPATforLLCeLLCselection,
1485 * .AgeforQUADLRU = 0
1486 */
1487#define GEN8_EXTERNAL_MOCS 0x18
1488
1489/* Skylake: MOCS is now an index into an array of 62 different caching
1490 * configurations programmed by the kernel.
1491 */
1492
1493/* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
1494#define GEN9_MOCS (2 << 1)
1495
1496/* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
1497#define GEN9_EXTERNAL_MOCS (1 << 1)
1498
1499/* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
1500#define GEN10_MOCS GEN9_MOCS
1501#define GEN10_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
1502
1503/* Ice Lake MOCS defines are duplicates of Skylake MOCS defines. */
1504#define GEN11_MOCS GEN9_MOCS
1505#define GEN11_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
1506
1507struct anv_device_memory {
1508   struct list_head                             link;
1509
1510   struct anv_bo *                              bo;
1511   struct anv_memory_type *                     type;
1512   VkDeviceSize                                 map_size;
1513   void *                                       map;
1514
1515   /* If set, we are holding reference to AHardwareBuffer
1516    * which we must release when memory is freed.
1517    */
1518   struct AHardwareBuffer *                     ahw;
1519
1520   /* If set, this memory comes from a host pointer. */
1521   void *                                       host_ptr;
1522};
1523
1524/**
1525 * Header for Vertex URB Entry (VUE)
1526 */
1527struct anv_vue_header {
1528   uint32_t Reserved;
1529   uint32_t RTAIndex; /* RenderTargetArrayIndex */
1530   uint32_t ViewportIndex;
1531   float PointWidth;
1532};
1533
1534/** Struct representing a sampled image descriptor
1535 *
1536 * This descriptor layout is used for sampled images, bare sampler, and
1537 * combined image/sampler descriptors.
1538 */
1539struct anv_sampled_image_descriptor {
1540   /** Bindless image handle
1541    *
1542    * This is expected to already be shifted such that the 20-bit
1543    * SURFACE_STATE table index is in the top 20 bits.
1544    */
1545   uint32_t image;
1546
1547   /** Bindless sampler handle
1548    *
1549    * This is assumed to be a 32B-aligned SAMPLER_STATE pointer relative
1550    * to the dynamic state base address.
1551    */
1552   uint32_t sampler;
1553};
1554
1555struct anv_texture_swizzle_descriptor {
1556   /** Texture swizzle
1557    *
1558    * See also nir_intrinsic_channel_select_intel
1559    */
1560   uint8_t swizzle[4];
1561
1562   /** Unused padding to ensure the struct is a multiple of 64 bits */
1563   uint32_t _pad;
1564};
1565
1566/** Struct representing a storage image descriptor */
1567struct anv_storage_image_descriptor {
1568   /** Bindless image handles
1569    *
1570    * These are expected to already be shifted such that the 20-bit
1571    * SURFACE_STATE table index is in the top 20 bits.
1572    */
1573   uint32_t read_write;
1574   uint32_t write_only;
1575};
1576
1577/** Struct representing a address/range descriptor
1578 *
1579 * The fields of this struct correspond directly to the data layout of
1580 * nir_address_format_64bit_bounded_global addresses.  The last field is the
1581 * offset in the NIR address so it must be zero so that when you load the
1582 * descriptor you get a pointer to the start of the range.
1583 */
1584struct anv_address_range_descriptor {
1585   uint64_t address;
1586   uint32_t range;
1587   uint32_t zero;
1588};
1589
1590enum anv_descriptor_data {
1591   /** The descriptor contains a BTI reference to a surface state */
1592   ANV_DESCRIPTOR_SURFACE_STATE  = (1 << 0),
1593   /** The descriptor contains a BTI reference to a sampler state */
1594   ANV_DESCRIPTOR_SAMPLER_STATE  = (1 << 1),
1595   /** The descriptor contains an actual buffer view */
1596   ANV_DESCRIPTOR_BUFFER_VIEW    = (1 << 2),
1597   /** The descriptor contains auxiliary image layout data */
1598   ANV_DESCRIPTOR_IMAGE_PARAM    = (1 << 3),
1599   /** The descriptor contains auxiliary image layout data */
1600   ANV_DESCRIPTOR_INLINE_UNIFORM = (1 << 4),
1601   /** anv_address_range_descriptor with a buffer address and range */
1602   ANV_DESCRIPTOR_ADDRESS_RANGE  = (1 << 5),
1603   /** Bindless surface handle */
1604   ANV_DESCRIPTOR_SAMPLED_IMAGE  = (1 << 6),
1605   /** Storage image handles */
1606   ANV_DESCRIPTOR_STORAGE_IMAGE  = (1 << 7),
1607   /** Storage image handles */
1608   ANV_DESCRIPTOR_TEXTURE_SWIZZLE  = (1 << 8),
1609};
1610
1611struct anv_descriptor_set_binding_layout {
1612#ifndef NDEBUG
1613   /* The type of the descriptors in this binding */
1614   VkDescriptorType type;
1615#endif
1616
1617   /* Flags provided when this binding was created */
1618   VkDescriptorBindingFlagsEXT flags;
1619
1620   /* Bitfield representing the type of data this descriptor contains */
1621   enum anv_descriptor_data data;
1622
1623   /* Maximum number of YCbCr texture/sampler planes */
1624   uint8_t max_plane_count;
1625
1626   /* Number of array elements in this binding (or size in bytes for inline
1627    * uniform data)
1628    */
1629   uint16_t array_size;
1630
1631   /* Index into the flattend descriptor set */
1632   uint16_t descriptor_index;
1633
1634   /* Index into the dynamic state array for a dynamic buffer */
1635   int16_t dynamic_offset_index;
1636
1637   /* Index into the descriptor set buffer views */
1638   int16_t buffer_view_index;
1639
1640   /* Offset into the descriptor buffer where this descriptor lives */
1641   uint32_t descriptor_offset;
1642
1643   /* Immutable samplers (or NULL if no immutable samplers) */
1644   struct anv_sampler **immutable_samplers;
1645};
1646
1647unsigned anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout);
1648
1649unsigned anv_descriptor_type_size(const struct anv_physical_device *pdevice,
1650                                  VkDescriptorType type);
1651
1652bool anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice,
1653                                      const struct anv_descriptor_set_binding_layout *binding,
1654                                      bool sampler);
1655
1656bool anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice,
1657                                      const struct anv_descriptor_set_binding_layout *binding,
1658                                      bool sampler);
1659
1660struct anv_descriptor_set_layout {
1661   /* Descriptor set layouts can be destroyed at almost any time */
1662   uint32_t ref_cnt;
1663
1664   /* Number of bindings in this descriptor set */
1665   uint16_t binding_count;
1666
1667   /* Total size of the descriptor set with room for all array entries */
1668   uint16_t size;
1669
1670   /* Shader stages affected by this descriptor set */
1671   uint16_t shader_stages;
1672
1673   /* Number of buffer views in this descriptor set */
1674   uint16_t buffer_view_count;
1675
1676   /* Number of dynamic offsets used by this descriptor set */
1677   uint16_t dynamic_offset_count;
1678
1679   /* Size of the descriptor buffer for this descriptor set */
1680   uint32_t descriptor_buffer_size;
1681
1682   /* Bindings in this descriptor set */
1683   struct anv_descriptor_set_binding_layout binding[0];
1684};
1685
1686static inline void
1687anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout *layout)
1688{
1689   assert(layout && layout->ref_cnt >= 1);
1690   p_atomic_inc(&layout->ref_cnt);
1691}
1692
1693static inline void
1694anv_descriptor_set_layout_unref(struct anv_device *device,
1695                                struct anv_descriptor_set_layout *layout)
1696{
1697   assert(layout && layout->ref_cnt >= 1);
1698   if (p_atomic_dec_zero(&layout->ref_cnt))
1699      vk_free(&device->alloc, layout);
1700}
1701
1702struct anv_descriptor {
1703   VkDescriptorType type;
1704
1705   union {
1706      struct {
1707         VkImageLayout layout;
1708         struct anv_image_view *image_view;
1709         struct anv_sampler *sampler;
1710      };
1711
1712      struct {
1713         struct anv_buffer *buffer;
1714         uint64_t offset;
1715         uint64_t range;
1716      };
1717
1718      struct anv_buffer_view *buffer_view;
1719   };
1720};
1721
1722struct anv_descriptor_set {
1723   struct anv_descriptor_pool *pool;
1724   struct anv_descriptor_set_layout *layout;
1725   uint32_t size;
1726
1727   /* State relative to anv_descriptor_pool::bo */
1728   struct anv_state desc_mem;
1729   /* Surface state for the descriptor buffer */
1730   struct anv_state desc_surface_state;
1731
1732   uint32_t buffer_view_count;
1733   struct anv_buffer_view *buffer_views;
1734
1735   /* Link to descriptor pool's desc_sets list . */
1736   struct list_head pool_link;
1737
1738   struct anv_descriptor descriptors[0];
1739};
1740
1741struct anv_buffer_view {
1742   enum isl_format format; /**< VkBufferViewCreateInfo::format */
1743   uint64_t range; /**< VkBufferViewCreateInfo::range */
1744
1745   struct anv_address address;
1746
1747   struct anv_state surface_state;
1748   struct anv_state storage_surface_state;
1749   struct anv_state writeonly_storage_surface_state;
1750
1751   struct brw_image_param storage_image_param;
1752};
1753
1754struct anv_push_descriptor_set {
1755   struct anv_descriptor_set set;
1756
1757   /* Put this field right behind anv_descriptor_set so it fills up the
1758    * descriptors[0] field. */
1759   struct anv_descriptor descriptors[MAX_PUSH_DESCRIPTORS];
1760
1761   /** True if the descriptor set buffer has been referenced by a draw or
1762    * dispatch command.
1763    */
1764   bool set_used_on_gpu;
1765
1766   struct anv_buffer_view buffer_views[MAX_PUSH_DESCRIPTORS];
1767};
1768
1769struct anv_descriptor_pool {
1770   uint32_t size;
1771   uint32_t next;
1772   uint32_t free_list;
1773
1774   struct anv_bo bo;
1775   struct util_vma_heap bo_heap;
1776
1777   struct anv_state_stream surface_state_stream;
1778   void *surface_state_free_list;
1779
1780   struct list_head desc_sets;
1781
1782   char data[0];
1783};
1784
1785enum anv_descriptor_template_entry_type {
1786   ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE,
1787   ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER,
1788   ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
1789};
1790
1791struct anv_descriptor_template_entry {
1792   /* The type of descriptor in this entry */
1793   VkDescriptorType type;
1794
1795   /* Binding in the descriptor set */
1796   uint32_t binding;
1797
1798   /* Offset at which to write into the descriptor set binding */
1799   uint32_t array_element;
1800
1801   /* Number of elements to write into the descriptor set binding */
1802   uint32_t array_count;
1803
1804   /* Offset into the user provided data */
1805   size_t offset;
1806
1807   /* Stride between elements into the user provided data */
1808   size_t stride;
1809};
1810
1811struct anv_descriptor_update_template {
1812    VkPipelineBindPoint bind_point;
1813
1814   /* The descriptor set this template corresponds to. This value is only
1815    * valid if the template was created with the templateType
1816    * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
1817    */
1818   uint8_t set;
1819
1820   /* Number of entries in this template */
1821   uint32_t entry_count;
1822
1823   /* Entries of the template */
1824   struct anv_descriptor_template_entry entries[0];
1825};
1826
1827size_t
1828anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout);
1829
1830void
1831anv_descriptor_set_write_image_view(struct anv_device *device,
1832                                    struct anv_descriptor_set *set,
1833                                    const VkDescriptorImageInfo * const info,
1834                                    VkDescriptorType type,
1835                                    uint32_t binding,
1836                                    uint32_t element);
1837
1838void
1839anv_descriptor_set_write_buffer_view(struct anv_device *device,
1840                                     struct anv_descriptor_set *set,
1841                                     VkDescriptorType type,
1842                                     struct anv_buffer_view *buffer_view,
1843                                     uint32_t binding,
1844                                     uint32_t element);
1845
1846void
1847anv_descriptor_set_write_buffer(struct anv_device *device,
1848                                struct anv_descriptor_set *set,
1849                                struct anv_state_stream *alloc_stream,
1850                                VkDescriptorType type,
1851                                struct anv_buffer *buffer,
1852                                uint32_t binding,
1853                                uint32_t element,
1854                                VkDeviceSize offset,
1855                                VkDeviceSize range);
1856void
1857anv_descriptor_set_write_inline_uniform_data(struct anv_device *device,
1858                                             struct anv_descriptor_set *set,
1859                                             uint32_t binding,
1860                                             const void *data,
1861                                             size_t offset,
1862                                             size_t size);
1863
1864void
1865anv_descriptor_set_write_template(struct anv_device *device,
1866                                  struct anv_descriptor_set *set,
1867                                  struct anv_state_stream *alloc_stream,
1868                                  const struct anv_descriptor_update_template *template,
1869                                  const void *data);
1870
1871VkResult
1872anv_descriptor_set_create(struct anv_device *device,
1873                          struct anv_descriptor_pool *pool,
1874                          struct anv_descriptor_set_layout *layout,
1875                          struct anv_descriptor_set **out_set);
1876
1877void
1878anv_descriptor_set_destroy(struct anv_device *device,
1879                           struct anv_descriptor_pool *pool,
1880                           struct anv_descriptor_set *set);
1881
1882#define ANV_DESCRIPTOR_SET_DESCRIPTORS      (UINT8_MAX - 3)
1883#define ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS  (UINT8_MAX - 2)
1884#define ANV_DESCRIPTOR_SET_SHADER_CONSTANTS (UINT8_MAX - 1)
1885#define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1886
1887struct anv_pipeline_binding {
1888   /* The descriptor set this surface corresponds to.  The special value of
1889    * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1890    * to a color attachment and not a regular descriptor.
1891    */
1892   uint8_t set;
1893
1894   /* Binding in the descriptor set */
1895   uint32_t binding;
1896
1897   /* Index in the binding */
1898   uint32_t index;
1899
1900   /* Plane in the binding index */
1901   uint8_t plane;
1902
1903   /* Input attachment index (relative to the subpass) */
1904   uint8_t input_attachment_index;
1905
1906   /* For a storage image, whether it is write-only */
1907   bool write_only;
1908};
1909
1910struct anv_pipeline_layout {
1911   struct {
1912      struct anv_descriptor_set_layout *layout;
1913      uint32_t dynamic_offset_start;
1914   } set[MAX_SETS];
1915
1916   uint32_t num_sets;
1917
1918   unsigned char sha1[20];
1919};
1920
1921struct anv_buffer {
1922   struct anv_device *                          device;
1923   VkDeviceSize                                 size;
1924
1925   VkBufferUsageFlags                           usage;
1926
1927   /* Set when bound */
1928   struct anv_address                           address;
1929};
1930
1931static inline uint64_t
1932anv_buffer_get_range(struct anv_buffer *buffer, uint64_t offset, uint64_t range)
1933{
1934   assert(offset <= buffer->size);
1935   if (range == VK_WHOLE_SIZE) {
1936      return buffer->size - offset;
1937   } else {
1938      assert(range + offset >= range);
1939      assert(range + offset <= buffer->size);
1940      return range;
1941   }
1942}
1943
1944enum anv_cmd_dirty_bits {
1945   ANV_CMD_DIRTY_DYNAMIC_VIEWPORT                  = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1946   ANV_CMD_DIRTY_DYNAMIC_SCISSOR                   = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1947   ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH                = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1948   ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS                = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1949   ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS           = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1950   ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS              = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1951   ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK      = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1952   ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK        = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1953   ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE         = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1954   ANV_CMD_DIRTY_DYNAMIC_ALL                       = (1 << 9) - 1,
1955   ANV_CMD_DIRTY_PIPELINE                          = 1 << 9,
1956   ANV_CMD_DIRTY_INDEX_BUFFER                      = 1 << 10,
1957   ANV_CMD_DIRTY_RENDER_TARGETS                    = 1 << 11,
1958   ANV_CMD_DIRTY_XFB_ENABLE                        = 1 << 12,
1959};
1960typedef uint32_t anv_cmd_dirty_mask_t;
1961
1962enum anv_pipe_bits {
1963   ANV_PIPE_DEPTH_CACHE_FLUSH_BIT            = (1 << 0),
1964   ANV_PIPE_STALL_AT_SCOREBOARD_BIT          = (1 << 1),
1965   ANV_PIPE_STATE_CACHE_INVALIDATE_BIT       = (1 << 2),
1966   ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT    = (1 << 3),
1967   ANV_PIPE_VF_CACHE_INVALIDATE_BIT          = (1 << 4),
1968   ANV_PIPE_DATA_CACHE_FLUSH_BIT             = (1 << 5),
1969   ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT     = (1 << 10),
1970   ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1971   ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT    = (1 << 12),
1972   ANV_PIPE_DEPTH_STALL_BIT                  = (1 << 13),
1973   ANV_PIPE_CS_STALL_BIT                     = (1 << 20),
1974
1975   /* This bit does not exist directly in PIPE_CONTROL.  Instead it means that
1976    * a flush has happened but not a CS stall.  The next time we do any sort
1977    * of invalidation we need to insert a CS stall at that time.  Otherwise,
1978    * we would have to CS stall on every flush which could be bad.
1979    */
1980   ANV_PIPE_NEEDS_CS_STALL_BIT               = (1 << 21),
1981
1982   /* This bit does not exist directly in PIPE_CONTROL. It means that render
1983    * target operations related to transfer commands with VkBuffer as
1984    * destination are ongoing. Some operations like copies on the command
1985    * streamer might need to be aware of this to trigger the appropriate stall
1986    * before they can proceed with the copy.
1987    */
1988   ANV_PIPE_RENDER_TARGET_BUFFER_WRITES      = (1 << 22),
1989};
1990
1991#define ANV_PIPE_FLUSH_BITS ( \
1992   ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1993   ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1994   ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1995
1996#define ANV_PIPE_STALL_BITS ( \
1997   ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1998   ANV_PIPE_DEPTH_STALL_BIT | \
1999   ANV_PIPE_CS_STALL_BIT)
2000
2001#define ANV_PIPE_INVALIDATE_BITS ( \
2002   ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
2003   ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
2004   ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
2005   ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2006   ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
2007   ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
2008
2009static inline enum anv_pipe_bits
2010anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags)
2011{
2012   enum anv_pipe_bits pipe_bits = 0;
2013
2014   unsigned b;
2015   for_each_bit(b, flags) {
2016      switch ((VkAccessFlagBits)(1 << b)) {
2017      case VK_ACCESS_SHADER_WRITE_BIT:
2018         /* We're transitioning a buffer that was previously used as write
2019          * destination through the data port. To make its content available
2020          * to future operations, flush the data cache.
2021          */
2022         pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2023         break;
2024      case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2025         /* We're transitioning a buffer that was previously used as render
2026          * target. To make its content available to future operations, flush
2027          * the render target cache.
2028          */
2029         pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2030         break;
2031      case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2032         /* We're transitioning a buffer that was previously used as depth
2033          * buffer. To make its content available to future operations, flush
2034          * the depth cache.
2035          */
2036         pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2037         break;
2038      case VK_ACCESS_TRANSFER_WRITE_BIT:
2039         /* We're transitioning a buffer that was previously used as a
2040          * transfer write destination. Generic write operations include color
2041          * & depth operations as well as buffer operations like :
2042          *     - vkCmdClearColorImage()
2043          *     - vkCmdClearDepthStencilImage()
2044          *     - vkCmdBlitImage()
2045          *     - vkCmdCopy*(), vkCmdUpdate*(), vkCmdFill*()
2046          *
2047          * Most of these operations are implemented using Blorp which writes
2048          * through the render target, so flush that cache to make it visible
2049          * to future operations. And for depth related operations we also
2050          * need to flush the depth cache.
2051          */
2052         pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2053         pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2054         break;
2055      case VK_ACCESS_MEMORY_WRITE_BIT:
2056         /* We're transitioning a buffer for generic write operations. Flush
2057          * all the caches.
2058          */
2059         pipe_bits |= ANV_PIPE_FLUSH_BITS;
2060         break;
2061      default:
2062         break; /* Nothing to do */
2063      }
2064   }
2065
2066   return pipe_bits;
2067}
2068
2069static inline enum anv_pipe_bits
2070anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags)
2071{
2072   enum anv_pipe_bits pipe_bits = 0;
2073
2074   unsigned b;
2075   for_each_bit(b, flags) {
2076      switch ((VkAccessFlagBits)(1 << b)) {
2077      case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2078         /* Indirect draw commands take a buffer as input that we're going to
2079          * read from the command streamer to load some of the HW registers
2080          * (see genX_cmd_buffer.c:load_indirect_parameters). This requires a
2081          * command streamer stall so that all the cache flushes have
2082          * completed before the command streamer loads from memory.
2083          */
2084         pipe_bits |=  ANV_PIPE_CS_STALL_BIT;
2085         /* Indirect draw commands also set gl_BaseVertex & gl_BaseIndex
2086          * through a vertex buffer, so invalidate that cache.
2087          */
2088         pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2089         /* For CmdDipatchIndirect, we also load gl_NumWorkGroups through a
2090          * UBO from the buffer, so we need to invalidate constant cache.
2091          */
2092         pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2093         break;
2094      case VK_ACCESS_INDEX_READ_BIT:
2095      case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2096         /* We transitioning a buffer to be used for as input for vkCmdDraw*
2097          * commands, so we invalidate the VF cache to make sure there is no
2098          * stale data when we start rendering.
2099          */
2100         pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2101         break;
2102      case VK_ACCESS_UNIFORM_READ_BIT:
2103         /* We transitioning a buffer to be used as uniform data. Because
2104          * uniform is accessed through the data port & sampler, we need to
2105          * invalidate the texture cache (sampler) & constant cache (data
2106          * port) to avoid stale data.
2107          */
2108         pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2109         pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2110         break;
2111      case VK_ACCESS_SHADER_READ_BIT:
2112      case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2113      case VK_ACCESS_TRANSFER_READ_BIT:
2114         /* Transitioning a buffer to be read through the sampler, so
2115          * invalidate the texture cache, we don't want any stale data.
2116          */
2117         pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2118         break;
2119      case VK_ACCESS_MEMORY_READ_BIT:
2120         /* Transitioning a buffer for generic read, invalidate all the
2121          * caches.
2122          */
2123         pipe_bits |= ANV_PIPE_INVALIDATE_BITS;
2124         break;
2125      case VK_ACCESS_MEMORY_WRITE_BIT:
2126         /* Generic write, make sure all previously written things land in
2127          * memory.
2128          */
2129         pipe_bits |= ANV_PIPE_FLUSH_BITS;
2130         break;
2131      case VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT:
2132         /* Transitioning a buffer for conditional rendering. We'll load the
2133          * content of this buffer into HW registers using the command
2134          * streamer, so we need to stall the command streamer to make sure
2135          * any in-flight flush operations have completed.
2136          */
2137         pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2138         break;
2139      default:
2140         break; /* Nothing to do */
2141      }
2142   }
2143
2144   return pipe_bits;
2145}
2146
2147#define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV (         \
2148   VK_IMAGE_ASPECT_COLOR_BIT | \
2149   VK_IMAGE_ASPECT_PLANE_0_BIT | \
2150   VK_IMAGE_ASPECT_PLANE_1_BIT | \
2151   VK_IMAGE_ASPECT_PLANE_2_BIT)
2152#define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
2153   VK_IMAGE_ASPECT_PLANE_0_BIT | \
2154   VK_IMAGE_ASPECT_PLANE_1_BIT | \
2155   VK_IMAGE_ASPECT_PLANE_2_BIT)
2156
2157struct anv_vertex_binding {
2158   struct anv_buffer *                          buffer;
2159   VkDeviceSize                                 offset;
2160};
2161
2162struct anv_xfb_binding {
2163   struct anv_buffer *                          buffer;
2164   VkDeviceSize                                 offset;
2165   VkDeviceSize                                 size;
2166};
2167
2168#define ANV_PARAM_PUSH(offset)         ((1 << 16) | (uint32_t)(offset))
2169#define ANV_PARAM_IS_PUSH(param)       ((uint32_t)(param) >> 16 == 1)
2170#define ANV_PARAM_PUSH_OFFSET(param)   ((param) & 0xffff)
2171
2172#define ANV_PARAM_DYN_OFFSET(offset)      ((2 << 16) | (uint32_t)(offset))
2173#define ANV_PARAM_IS_DYN_OFFSET(param)    ((uint32_t)(param) >> 16 == 2)
2174#define ANV_PARAM_DYN_OFFSET_IDX(param)   ((param) & 0xffff)
2175
2176struct anv_push_constants {
2177   /* Push constant data provided by the client through vkPushConstants */
2178   uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
2179
2180   /* Used for vkCmdDispatchBase */
2181   uint32_t base_work_group_id[3];
2182};
2183
2184struct anv_dynamic_state {
2185   struct {
2186      uint32_t                                  count;
2187      VkViewport                                viewports[MAX_VIEWPORTS];
2188   } viewport;
2189
2190   struct {
2191      uint32_t                                  count;
2192      VkRect2D                                  scissors[MAX_SCISSORS];
2193   } scissor;
2194
2195   float                                        line_width;
2196
2197   struct {
2198      float                                     bias;
2199      float                                     clamp;
2200      float                                     slope;
2201   } depth_bias;
2202
2203   float                                        blend_constants[4];
2204
2205   struct {
2206      float                                     min;
2207      float                                     max;
2208   } depth_bounds;
2209
2210   struct {
2211      uint32_t                                  front;
2212      uint32_t                                  back;
2213   } stencil_compare_mask;
2214
2215   struct {
2216      uint32_t                                  front;
2217      uint32_t                                  back;
2218   } stencil_write_mask;
2219
2220   struct {
2221      uint32_t                                  front;
2222      uint32_t                                  back;
2223   } stencil_reference;
2224};
2225
2226extern const struct anv_dynamic_state default_dynamic_state;
2227
2228void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
2229                            const struct anv_dynamic_state *src,
2230                            uint32_t copy_mask);
2231
2232struct anv_surface_state {
2233   struct anv_state state;
2234   /** Address of the surface referred to by this state
2235    *
2236    * This address is relative to the start of the BO.
2237    */
2238   struct anv_address address;
2239   /* Address of the aux surface, if any
2240    *
2241    * This field is ANV_NULL_ADDRESS if and only if no aux surface exists.
2242    *
2243    * With the exception of gen8, the bottom 12 bits of this address' offset
2244    * include extra aux information.
2245    */
2246   struct anv_address aux_address;
2247   /* Address of the clear color, if any
2248    *
2249    * This address is relative to the start of the BO.
2250    */
2251   struct anv_address clear_address;
2252};
2253
2254/**
2255 * Attachment state when recording a renderpass instance.
2256 *
2257 * The clear value is valid only if there exists a pending clear.
2258 */
2259struct anv_attachment_state {
2260   enum isl_aux_usage                           aux_usage;
2261   enum isl_aux_usage                           input_aux_usage;
2262   struct anv_surface_state                     color;
2263   struct anv_surface_state                     input;
2264
2265   VkImageLayout                                current_layout;
2266   VkImageAspectFlags                           pending_clear_aspects;
2267   VkImageAspectFlags                           pending_load_aspects;
2268   bool                                         fast_clear;
2269   VkClearValue                                 clear_value;
2270   bool                                         clear_color_is_zero_one;
2271   bool                                         clear_color_is_zero;
2272
2273   /* When multiview is active, attachments with a renderpass clear
2274    * operation have their respective layers cleared on the first
2275    * subpass that uses them, and only in that subpass. We keep track
2276    * of this using a bitfield to indicate which layers of an attachment
2277    * have not been cleared yet when multiview is active.
2278    */
2279   uint32_t                                     pending_clear_views;
2280};
2281
2282/** State tracking for particular pipeline bind point
2283 *
2284 * This struct is the base struct for anv_cmd_graphics_state and
2285 * anv_cmd_compute_state.  These are used to track state which is bound to a
2286 * particular type of pipeline.  Generic state that applies per-stage such as
2287 * binding table offsets and push constants is tracked generically with a
2288 * per-stage array in anv_cmd_state.
2289 */
2290struct anv_cmd_pipeline_state {
2291   struct anv_pipeline *pipeline;
2292   struct anv_pipeline_layout *layout;
2293
2294   struct anv_descriptor_set *descriptors[MAX_SETS];
2295   uint32_t dynamic_offsets[MAX_DYNAMIC_BUFFERS];
2296
2297   struct anv_push_descriptor_set *push_descriptors[MAX_SETS];
2298};
2299
2300/** State tracking for graphics pipeline
2301 *
2302 * This has anv_cmd_pipeline_state as a base struct to track things which get
2303 * bound to a graphics pipeline.  Along with general pipeline bind point state
2304 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2305 * state which is graphics-specific.
2306 */
2307struct anv_cmd_graphics_state {
2308   struct anv_cmd_pipeline_state base;
2309
2310   anv_cmd_dirty_mask_t dirty;
2311   uint32_t vb_dirty;
2312
2313   struct anv_dynamic_state dynamic;
2314
2315   struct {
2316      struct anv_buffer *index_buffer;
2317      uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
2318      uint32_t index_offset;
2319   } gen7;
2320};
2321
2322/** State tracking for compute pipeline
2323 *
2324 * This has anv_cmd_pipeline_state as a base struct to track things which get
2325 * bound to a compute pipeline.  Along with general pipeline bind point state
2326 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2327 * state which is compute-specific.
2328 */
2329struct anv_cmd_compute_state {
2330   struct anv_cmd_pipeline_state base;
2331
2332   bool pipeline_dirty;
2333
2334   struct anv_address num_workgroups;
2335};
2336
2337/** State required while building cmd buffer */
2338struct anv_cmd_state {
2339   /* PIPELINE_SELECT.PipelineSelection */
2340   uint32_t                                     current_pipeline;
2341   const struct gen_l3_config *                 current_l3_config;
2342
2343   struct anv_cmd_graphics_state                gfx;
2344   struct anv_cmd_compute_state                 compute;
2345
2346   enum anv_pipe_bits                           pending_pipe_bits;
2347   VkShaderStageFlags                           descriptors_dirty;
2348   VkShaderStageFlags                           push_constants_dirty;
2349
2350   struct anv_framebuffer *                     framebuffer;
2351   struct anv_render_pass *                     pass;
2352   struct anv_subpass *                         subpass;
2353   VkRect2D                                     render_area;
2354   uint32_t                                     restart_index;
2355   struct anv_vertex_binding                    vertex_bindings[MAX_VBS];
2356   bool                                         xfb_enabled;
2357   struct anv_xfb_binding                       xfb_bindings[MAX_XFB_BUFFERS];
2358   VkShaderStageFlags                           push_constant_stages;
2359   struct anv_push_constants                    push_constants[MESA_SHADER_STAGES];
2360   struct anv_state                             binding_tables[MESA_SHADER_STAGES];
2361   struct anv_state                             samplers[MESA_SHADER_STAGES];
2362
2363   /**
2364    * Whether or not the gen8 PMA fix is enabled.  We ensure that, at the top
2365    * of any command buffer it is disabled by disabling it in EndCommandBuffer
2366    * and before invoking the secondary in ExecuteCommands.
2367    */
2368   bool                                         pma_fix_enabled;
2369
2370   /**
2371    * Whether or not we know for certain that HiZ is enabled for the current
2372    * subpass.  If, for whatever reason, we are unsure as to whether HiZ is
2373    * enabled or not, this will be false.
2374    */
2375   bool                                         hiz_enabled;
2376
2377   bool                                         conditional_render_enabled;
2378
2379   /**
2380    * Array length is anv_cmd_state::pass::attachment_count. Array content is
2381    * valid only when recording a render pass instance.
2382    */
2383   struct anv_attachment_state *                attachments;
2384
2385   /**
2386    * Surface states for color render targets.  These are stored in a single
2387    * flat array.  For depth-stencil attachments, the surface state is simply
2388    * left blank.
2389    */
2390   struct anv_state                             render_pass_states;
2391
2392   /**
2393    * A null surface state of the right size to match the framebuffer.  This
2394    * is one of the states in render_pass_states.
2395    */
2396   struct anv_state                             null_surface_state;
2397};
2398
2399struct anv_cmd_pool {
2400   VkAllocationCallbacks                        alloc;
2401   struct list_head                             cmd_buffers;
2402};
2403
2404#define ANV_CMD_BUFFER_BATCH_SIZE 8192
2405
2406enum anv_cmd_buffer_exec_mode {
2407   ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
2408   ANV_CMD_BUFFER_EXEC_MODE_EMIT,
2409   ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
2410   ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
2411   ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
2412};
2413
2414struct anv_cmd_buffer {
2415   VK_LOADER_DATA                               _loader_data;
2416
2417   struct anv_device *                          device;
2418
2419   struct anv_cmd_pool *                        pool;
2420   struct list_head                             pool_link;
2421
2422   struct anv_batch                             batch;
2423
2424   /* Fields required for the actual chain of anv_batch_bo's.
2425    *
2426    * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
2427    */
2428   struct list_head                             batch_bos;
2429   enum anv_cmd_buffer_exec_mode                exec_mode;
2430
2431   /* A vector of anv_batch_bo pointers for every batch or surface buffer
2432    * referenced by this command buffer
2433    *
2434    * initialized by anv_cmd_buffer_init_batch_bo_chain()
2435    */
2436   struct u_vector                            seen_bbos;
2437
2438   /* A vector of int32_t's for every block of binding tables.
2439    *
2440    * initialized by anv_cmd_buffer_init_batch_bo_chain()
2441    */
2442   struct u_vector                              bt_block_states;
2443   uint32_t                                     bt_next;
2444
2445   struct anv_reloc_list                        surface_relocs;
2446   /** Last seen surface state block pool center bo offset */
2447   uint32_t                                     last_ss_pool_center;
2448
2449   /* Serial for tracking buffer completion */
2450   uint32_t                                     serial;
2451
2452   /* Stream objects for storing temporary data */
2453   struct anv_state_stream                      surface_state_stream;
2454   struct anv_state_stream                      dynamic_state_stream;
2455
2456   VkCommandBufferUsageFlags                    usage_flags;
2457   VkCommandBufferLevel                         level;
2458
2459   struct anv_cmd_state                         state;
2460};
2461
2462VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
2463void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
2464void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
2465void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
2466void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
2467                                  struct anv_cmd_buffer *secondary);
2468void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
2469VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
2470                                struct anv_cmd_buffer *cmd_buffer,
2471                                const VkSemaphore *in_semaphores,
2472                                uint32_t num_in_semaphores,
2473                                const VkSemaphore *out_semaphores,
2474                                uint32_t num_out_semaphores,
2475                                VkFence fence);
2476
2477VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
2478
2479struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
2480                                             const void *data, uint32_t size, uint32_t alignment);
2481struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
2482                                              uint32_t *a, uint32_t *b,
2483                                              uint32_t dwords, uint32_t alignment);
2484
2485struct anv_address
2486anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
2487struct anv_state
2488anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
2489                                   uint32_t entries, uint32_t *state_offset);
2490struct anv_state
2491anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
2492struct anv_state
2493anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
2494                                   uint32_t size, uint32_t alignment);
2495
2496VkResult
2497anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
2498
2499void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
2500void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
2501                                         bool depth_clamp_enable);
2502void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
2503
2504void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
2505                                      struct anv_render_pass *pass,
2506                                      struct anv_framebuffer *framebuffer,
2507                                      const VkClearValue *clear_values);
2508
2509void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
2510
2511struct anv_state
2512anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
2513                              gl_shader_stage stage);
2514struct anv_state
2515anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
2516
2517const struct anv_image_view *
2518anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
2519
2520VkResult
2521anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
2522                                         uint32_t num_entries,
2523                                         uint32_t *state_offset,
2524                                         struct anv_state *bt_state);
2525
2526void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
2527
2528void anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer);
2529
2530enum anv_fence_type {
2531   ANV_FENCE_TYPE_NONE = 0,
2532   ANV_FENCE_TYPE_BO,
2533   ANV_FENCE_TYPE_SYNCOBJ,
2534   ANV_FENCE_TYPE_WSI,
2535};
2536
2537enum anv_bo_fence_state {
2538   /** Indicates that this is a new (or newly reset fence) */
2539   ANV_BO_FENCE_STATE_RESET,
2540
2541   /** Indicates that this fence has been submitted to the GPU but is still
2542    * (as far as we know) in use by the GPU.
2543    */
2544   ANV_BO_FENCE_STATE_SUBMITTED,
2545
2546   ANV_BO_FENCE_STATE_SIGNALED,
2547};
2548
2549struct anv_fence_impl {
2550   enum anv_fence_type type;
2551
2552   union {
2553      /** Fence implementation for BO fences
2554       *
2555       * These fences use a BO and a set of CPU-tracked state flags.  The BO
2556       * is added to the object list of the last execbuf call in a QueueSubmit
2557       * and is marked EXEC_WRITE.  The state flags track when the BO has been
2558       * submitted to the kernel.  We need to do this because Vulkan lets you
2559       * wait on a fence that has not yet been submitted and I915_GEM_BUSY
2560       * will say it's idle in this case.
2561       */
2562      struct {
2563         struct anv_bo bo;
2564         enum anv_bo_fence_state state;
2565      } bo;
2566
2567      /** DRM syncobj handle for syncobj-based fences */
2568      uint32_t syncobj;
2569
2570      /** WSI fence */
2571      struct wsi_fence *fence_wsi;
2572   };
2573};
2574
2575struct anv_fence {
2576   /* Permanent fence state.  Every fence has some form of permanent state
2577    * (type != ANV_SEMAPHORE_TYPE_NONE).  This may be a BO to fence on (for
2578    * cross-process fences) or it could just be a dummy for use internally.
2579    */
2580   struct anv_fence_impl permanent;
2581
2582   /* Temporary fence state.  A fence *may* have temporary state.  That state
2583    * is added to the fence by an import operation and is reset back to
2584    * ANV_SEMAPHORE_TYPE_NONE when the fence is reset.  A fence with temporary
2585    * state cannot be signaled because the fence must already be signaled
2586    * before the temporary state can be exported from the fence in the other
2587    * process and imported here.
2588    */
2589   struct anv_fence_impl temporary;
2590};
2591
2592struct anv_event {
2593   uint64_t                                     semaphore;
2594   struct anv_state                             state;
2595};
2596
2597enum anv_semaphore_type {
2598   ANV_SEMAPHORE_TYPE_NONE = 0,
2599   ANV_SEMAPHORE_TYPE_DUMMY,
2600   ANV_SEMAPHORE_TYPE_BO,
2601   ANV_SEMAPHORE_TYPE_SYNC_FILE,
2602   ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
2603};
2604
2605struct anv_semaphore_impl {
2606   enum anv_semaphore_type type;
2607
2608   union {
2609      /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO.
2610       * This BO will be added to the object list on any execbuf2 calls for
2611       * which this semaphore is used as a wait or signal fence.  When used as
2612       * a signal fence, the EXEC_OBJECT_WRITE flag will be set.
2613       */
2614      struct anv_bo *bo;
2615
2616      /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
2617       * If the semaphore is in the unsignaled state due to either just being
2618       * created or because it has been used for a wait, fd will be -1.
2619       */
2620      int fd;
2621
2622      /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
2623       * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
2624       * import so we don't need to bother with a userspace cache.
2625       */
2626      uint32_t syncobj;
2627   };
2628};
2629
2630struct anv_semaphore {
2631   /* Permanent semaphore state.  Every semaphore has some form of permanent
2632    * state (type != ANV_SEMAPHORE_TYPE_NONE).  This may be a BO to fence on
2633    * (for cross-process semaphores0 or it could just be a dummy for use
2634    * internally.
2635    */
2636   struct anv_semaphore_impl permanent;
2637
2638   /* Temporary semaphore state.  A semaphore *may* have temporary state.
2639    * That state is added to the semaphore by an import operation and is reset
2640    * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on.  A
2641    * semaphore with temporary state cannot be signaled because the semaphore
2642    * must already be signaled before the temporary state can be exported from
2643    * the semaphore in the other process and imported here.
2644    */
2645   struct anv_semaphore_impl temporary;
2646};
2647
2648void anv_semaphore_reset_temporary(struct anv_device *device,
2649                                   struct anv_semaphore *semaphore);
2650
2651struct anv_shader_module {
2652   unsigned char                                sha1[20];
2653   uint32_t                                     size;
2654   char                                         data[0];
2655};
2656
2657static inline gl_shader_stage
2658vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
2659{
2660   assert(__builtin_popcount(vk_stage) == 1);
2661   return ffs(vk_stage) - 1;
2662}
2663
2664static inline VkShaderStageFlagBits
2665mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
2666{
2667   return (1 << mesa_stage);
2668}
2669
2670#define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
2671
2672#define anv_foreach_stage(stage, stage_bits)                         \
2673   for (gl_shader_stage stage,                                       \
2674        __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK);    \
2675        stage = __builtin_ffs(__tmp) - 1, __tmp;                     \
2676        __tmp &= ~(1 << (stage)))
2677
2678struct anv_pipeline_bind_map {
2679   uint32_t surface_count;
2680   uint32_t sampler_count;
2681
2682   struct anv_pipeline_binding *                surface_to_descriptor;
2683   struct anv_pipeline_binding *                sampler_to_descriptor;
2684};
2685
2686struct anv_shader_bin_key {
2687   uint32_t size;
2688   uint8_t data[0];
2689};
2690
2691struct anv_shader_bin {
2692   uint32_t ref_cnt;
2693
2694   const struct anv_shader_bin_key *key;
2695
2696   struct anv_state kernel;
2697   uint32_t kernel_size;
2698
2699   struct anv_state constant_data;
2700   uint32_t constant_data_size;
2701
2702   const struct brw_stage_prog_data *prog_data;
2703   uint32_t prog_data_size;
2704
2705   struct nir_xfb_info *xfb_info;
2706
2707   struct anv_pipeline_bind_map bind_map;
2708};
2709
2710struct anv_shader_bin *
2711anv_shader_bin_create(struct anv_device *device,
2712                      const void *key, uint32_t key_size,
2713                      const void *kernel, uint32_t kernel_size,
2714                      const void *constant_data, uint32_t constant_data_size,
2715                      const struct brw_stage_prog_data *prog_data,
2716                      uint32_t prog_data_size, const void *prog_data_param,
2717                      const struct nir_xfb_info *xfb_info,
2718                      const struct anv_pipeline_bind_map *bind_map);
2719
2720void
2721anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
2722
2723static inline void
2724anv_shader_bin_ref(struct anv_shader_bin *shader)
2725{
2726   assert(shader && shader->ref_cnt >= 1);
2727   p_atomic_inc(&shader->ref_cnt);
2728}
2729
2730static inline void
2731anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
2732{
2733   assert(shader && shader->ref_cnt >= 1);
2734   if (p_atomic_dec_zero(&shader->ref_cnt))
2735      anv_shader_bin_destroy(device, shader);
2736}
2737
2738struct anv_pipeline {
2739   struct anv_device *                          device;
2740   struct anv_batch                             batch;
2741   uint32_t                                     batch_data[512];
2742   struct anv_reloc_list                        batch_relocs;
2743   uint32_t                                     dynamic_state_mask;
2744   struct anv_dynamic_state                     dynamic_state;
2745
2746   struct anv_subpass *                         subpass;
2747
2748   bool                                         needs_data_cache;
2749
2750   struct anv_shader_bin *                      shaders[MESA_SHADER_STAGES];
2751
2752   struct {
2753      const struct gen_l3_config *              l3_config;
2754      uint32_t                                  total_size;
2755   } urb;
2756
2757   VkShaderStageFlags                           active_stages;
2758   struct anv_state                             blend_state;
2759
2760   uint32_t                                     vb_used;
2761   struct anv_pipeline_vertex_binding {
2762      uint32_t                                  stride;
2763      bool                                      instanced;
2764      uint32_t                                  instance_divisor;
2765   } vb[MAX_VBS];
2766
2767   uint8_t                                      xfb_used;
2768
2769   bool                                         primitive_restart;
2770   uint32_t                                     topology;
2771
2772   uint32_t                                     cs_right_mask;
2773
2774   bool                                         writes_depth;
2775   bool                                         depth_test_enable;
2776   bool                                         writes_stencil;
2777   bool                                         stencil_test_enable;
2778   bool                                         depth_clamp_enable;
2779   bool                                         depth_clip_enable;
2780   bool                                         sample_shading_enable;
2781   bool                                         kill_pixel;
2782
2783   struct {
2784      uint32_t                                  sf[7];
2785      uint32_t                                  depth_stencil_state[3];
2786   } gen7;
2787
2788   struct {
2789      uint32_t                                  sf[4];
2790      uint32_t                                  raster[5];
2791      uint32_t                                  wm_depth_stencil[3];
2792   } gen8;
2793
2794   struct {
2795      uint32_t                                  wm_depth_stencil[4];
2796   } gen9;
2797
2798   uint32_t                                     interface_descriptor_data[8];
2799};
2800
2801static inline bool
2802anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
2803                       gl_shader_stage stage)
2804{
2805   return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
2806}
2807
2808#define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage)                   \
2809static inline const struct brw_##prefix##_prog_data *                \
2810get_##prefix##_prog_data(const struct anv_pipeline *pipeline)        \
2811{                                                                    \
2812   if (anv_pipeline_has_stage(pipeline, stage)) {                    \
2813      return (const struct brw_##prefix##_prog_data *)               \
2814             pipeline->shaders[stage]->prog_data;                    \
2815   } else {                                                          \
2816      return NULL;                                                   \
2817   }                                                                 \
2818}
2819
2820ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
2821ANV_DECL_GET_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
2822ANV_DECL_GET_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
2823ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
2824ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
2825ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
2826
2827static inline const struct brw_vue_prog_data *
2828anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline)
2829{
2830   if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
2831      return &get_gs_prog_data(pipeline)->base;
2832   else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
2833      return &get_tes_prog_data(pipeline)->base;
2834   else
2835      return &get_vs_prog_data(pipeline)->base;
2836}
2837
2838VkResult
2839anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
2840                  struct anv_pipeline_cache *cache,
2841                  const VkGraphicsPipelineCreateInfo *pCreateInfo,
2842                  const VkAllocationCallbacks *alloc);
2843
2844VkResult
2845anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
2846                        struct anv_pipeline_cache *cache,
2847                        const VkComputePipelineCreateInfo *info,
2848                        const struct anv_shader_module *module,
2849                        const char *entrypoint,
2850                        const VkSpecializationInfo *spec_info);
2851
2852struct anv_format_plane {
2853   enum isl_format isl_format:16;
2854   struct isl_swizzle swizzle;
2855
2856   /* Whether this plane contains chroma channels */
2857   bool has_chroma;
2858
2859   /* For downscaling of YUV planes */
2860   uint8_t denominator_scales[2];
2861
2862   /* How to map sampled ycbcr planes to a single 4 component element. */
2863   struct isl_swizzle ycbcr_swizzle;
2864
2865   /* What aspect is associated to this plane */
2866   VkImageAspectFlags aspect;
2867};
2868
2869
2870struct anv_format {
2871   struct anv_format_plane planes[3];
2872   VkFormat vk_format;
2873   uint8_t n_planes;
2874   bool can_ycbcr;
2875};
2876
2877static inline uint32_t
2878anv_image_aspect_to_plane(VkImageAspectFlags image_aspects,
2879                          VkImageAspectFlags aspect_mask)
2880{
2881   switch (aspect_mask) {
2882   case VK_IMAGE_ASPECT_COLOR_BIT:
2883   case VK_IMAGE_ASPECT_DEPTH_BIT:
2884   case VK_IMAGE_ASPECT_PLANE_0_BIT:
2885      return 0;
2886   case VK_IMAGE_ASPECT_STENCIL_BIT:
2887      if ((image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) == 0)
2888         return 0;
2889      /* Fall-through */
2890   case VK_IMAGE_ASPECT_PLANE_1_BIT:
2891      return 1;
2892   case VK_IMAGE_ASPECT_PLANE_2_BIT:
2893      return 2;
2894   default:
2895      /* Purposefully assert with depth/stencil aspects. */
2896      unreachable("invalid image aspect");
2897   }
2898}
2899
2900static inline VkImageAspectFlags
2901anv_plane_to_aspect(VkImageAspectFlags image_aspects,
2902                    uint32_t plane)
2903{
2904   if (image_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
2905      if (util_bitcount(image_aspects) > 1)
2906         return VK_IMAGE_ASPECT_PLANE_0_BIT << plane;
2907      return VK_IMAGE_ASPECT_COLOR_BIT;
2908   }
2909   if (image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
2910      return VK_IMAGE_ASPECT_DEPTH_BIT << plane;
2911   assert(image_aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
2912   return VK_IMAGE_ASPECT_STENCIL_BIT;
2913}
2914
2915#define anv_foreach_image_aspect_bit(b, image, aspects) \
2916   for_each_bit(b, anv_image_expand_aspects(image, aspects))
2917
2918const struct anv_format *
2919anv_get_format(VkFormat format);
2920
2921static inline uint32_t
2922anv_get_format_planes(VkFormat vk_format)
2923{
2924   const struct anv_format *format = anv_get_format(vk_format);
2925
2926   return format != NULL ? format->n_planes : 0;
2927}
2928
2929struct anv_format_plane
2930anv_get_format_plane(const struct gen_device_info *devinfo, VkFormat vk_format,
2931                     VkImageAspectFlagBits aspect, VkImageTiling tiling);
2932
2933static inline enum isl_format
2934anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
2935                   VkImageAspectFlags aspect, VkImageTiling tiling)
2936{
2937   return anv_get_format_plane(devinfo, vk_format, aspect, tiling).isl_format;
2938}
2939
2940static inline struct isl_swizzle
2941anv_swizzle_for_render(struct isl_swizzle swizzle)
2942{
2943   /* Sometimes the swizzle will have alpha map to one.  We do this to fake
2944    * RGB as RGBA for texturing
2945    */
2946   assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
2947          swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
2948
2949   /* But it doesn't matter what we render to that channel */
2950   swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
2951
2952   return swizzle;
2953}
2954
2955void
2956anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
2957
2958/**
2959 * Subsurface of an anv_image.
2960 */
2961struct anv_surface {
2962   /** Valid only if isl_surf::size_B > 0. */
2963   struct isl_surf isl;
2964
2965   /**
2966    * Offset from VkImage's base address, as bound by vkBindImageMemory().
2967    */
2968   uint32_t offset;
2969};
2970
2971struct anv_image {
2972   VkImageType type; /**< VkImageCreateInfo::imageType */
2973   /* The original VkFormat provided by the client.  This may not match any
2974    * of the actual surface formats.
2975    */
2976   VkFormat vk_format;
2977   const struct anv_format *format;
2978
2979   VkImageAspectFlags aspects;
2980   VkExtent3D extent;
2981   uint32_t levels;
2982   uint32_t array_size;
2983   uint32_t samples; /**< VkImageCreateInfo::samples */
2984   uint32_t n_planes;
2985   VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
2986   VkImageCreateFlags create_flags; /* Flags used when creating image. */
2987   VkImageTiling tiling; /** VkImageCreateInfo::tiling */
2988
2989   /** True if this is needs to be bound to an appropriately tiled BO.
2990    *
2991    * When not using modifiers, consumers such as X11, Wayland, and KMS need
2992    * the tiling passed via I915_GEM_SET_TILING.  When exporting these buffers
2993    * we require a dedicated allocation so that we can know to allocate a
2994    * tiled buffer.
2995    */
2996   bool needs_set_tiling;
2997
2998   /**
2999    * Must be DRM_FORMAT_MOD_INVALID unless tiling is
3000    * VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.
3001    */
3002   uint64_t drm_format_mod;
3003
3004   VkDeviceSize size;
3005   uint32_t alignment;
3006
3007   /* Whether the image is made of several underlying buffer objects rather a
3008    * single one with different offsets.
3009    */
3010   bool disjoint;
3011
3012   /* All the formats that can be used when creating views of this image
3013    * are CCS_E compatible.
3014    */
3015   bool ccs_e_compatible;
3016
3017   /* Image was created with external format. */
3018   bool external_format;
3019
3020   /**
3021    * Image subsurfaces
3022    *
3023    * For each foo, anv_image::planes[x].surface is valid if and only if
3024    * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
3025    * to figure the number associated with a given aspect.
3026    *
3027    * The hardware requires that the depth buffer and stencil buffer be
3028    * separate surfaces.  From Vulkan's perspective, though, depth and stencil
3029    * reside in the same VkImage.  To satisfy both the hardware and Vulkan, we
3030    * allocate the depth and stencil buffers as separate surfaces in the same
3031    * bo.
3032    *
3033    * Memory layout :
3034    *
3035    * -----------------------
3036    * |     surface0        |   /|\
3037    * -----------------------    |
3038    * |   shadow surface0   |    |
3039    * -----------------------    | Plane 0
3040    * |    aux surface0     |    |
3041    * -----------------------    |
3042    * | fast clear colors0  |   \|/
3043    * -----------------------
3044    * |     surface1        |   /|\
3045    * -----------------------    |
3046    * |   shadow surface1   |    |
3047    * -----------------------    | Plane 1
3048    * |    aux surface1     |    |
3049    * -----------------------    |
3050    * | fast clear colors1  |   \|/
3051    * -----------------------
3052    * |        ...          |
3053    * |                     |
3054    * -----------------------
3055    */
3056   struct {
3057      /**
3058       * Offset of the entire plane (whenever the image is disjoint this is
3059       * set to 0).
3060       */
3061      uint32_t offset;
3062
3063      VkDeviceSize size;
3064      uint32_t alignment;
3065
3066      struct anv_surface surface;
3067
3068      /**
3069       * A surface which shadows the main surface and may have different
3070       * tiling. This is used for sampling using a tiling that isn't supported
3071       * for other operations.
3072       */
3073      struct anv_surface shadow_surface;
3074
3075      /**
3076       * For color images, this is the aux usage for this image when not used
3077       * as a color attachment.
3078       *
3079       * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
3080       * image has a HiZ buffer.
3081       */
3082      enum isl_aux_usage aux_usage;
3083
3084      struct anv_surface aux_surface;
3085
3086      /**
3087       * Offset of the fast clear state (used to compute the
3088       * fast_clear_state_offset of the following planes).
3089       */
3090      uint32_t fast_clear_state_offset;
3091
3092      /**
3093       * BO associated with this plane, set when bound.
3094       */
3095      struct anv_address address;
3096
3097      /**
3098       * When destroying the image, also free the bo.
3099       * */
3100      bool bo_is_owned;
3101   } planes[3];
3102};
3103
3104/* The ordering of this enum is important */
3105enum anv_fast_clear_type {
3106   /** Image does not have/support any fast-clear blocks */
3107   ANV_FAST_CLEAR_NONE = 0,
3108   /** Image has/supports fast-clear but only to the default value */
3109   ANV_FAST_CLEAR_DEFAULT_VALUE = 1,
3110   /** Image has/supports fast-clear with an arbitrary fast-clear value */
3111   ANV_FAST_CLEAR_ANY = 2,
3112};
3113
3114/* Returns the number of auxiliary buffer levels attached to an image. */
3115static inline uint8_t
3116anv_image_aux_levels(const struct anv_image * const image,
3117                     VkImageAspectFlagBits aspect)
3118{
3119   uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3120   return image->planes[plane].aux_surface.isl.size_B > 0 ?
3121          image->planes[plane].aux_surface.isl.levels : 0;
3122}
3123
3124/* Returns the number of auxiliary buffer layers attached to an image. */
3125static inline uint32_t
3126anv_image_aux_layers(const struct anv_image * const image,
3127                     VkImageAspectFlagBits aspect,
3128                     const uint8_t miplevel)
3129{
3130   assert(image);
3131
3132   /* The miplevel must exist in the main buffer. */
3133   assert(miplevel < image->levels);
3134
3135   if (miplevel >= anv_image_aux_levels(image, aspect)) {
3136      /* There are no layers with auxiliary data because the miplevel has no
3137       * auxiliary data.
3138       */
3139      return 0;
3140   } else {
3141      uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3142      return MAX2(image->planes[plane].aux_surface.isl.logical_level0_px.array_len,
3143                  image->planes[plane].aux_surface.isl.logical_level0_px.depth >> miplevel);
3144   }
3145}
3146
3147static inline struct anv_address
3148anv_image_get_clear_color_addr(const struct anv_device *device,
3149                               const struct anv_image *image,
3150                               VkImageAspectFlagBits aspect)
3151{
3152   assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
3153
3154   uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3155   return anv_address_add(image->planes[plane].address,
3156                          image->planes[plane].fast_clear_state_offset);
3157}
3158
3159static inline struct anv_address
3160anv_image_get_fast_clear_type_addr(const struct anv_device *device,
3161                                   const struct anv_image *image,
3162                                   VkImageAspectFlagBits aspect)
3163{
3164   struct anv_address addr =
3165      anv_image_get_clear_color_addr(device, image, aspect);
3166
3167   const unsigned clear_color_state_size = device->info.gen >= 10 ?
3168      device->isl_dev.ss.clear_color_state_size :
3169      device->isl_dev.ss.clear_value_size;
3170   return anv_address_add(addr, clear_color_state_size);
3171}
3172
3173static inline struct anv_address
3174anv_image_get_compression_state_addr(const struct anv_device *device,
3175                                     const struct anv_image *image,
3176                                     VkImageAspectFlagBits aspect,
3177                                     uint32_t level, uint32_t array_layer)
3178{
3179   assert(level < anv_image_aux_levels(image, aspect));
3180   assert(array_layer < anv_image_aux_layers(image, aspect, level));
3181   UNUSED uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3182   assert(image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E);
3183
3184   struct anv_address addr =
3185      anv_image_get_fast_clear_type_addr(device, image, aspect);
3186   addr.offset += 4; /* Go past the fast clear type */
3187
3188   if (image->type == VK_IMAGE_TYPE_3D) {
3189      for (uint32_t l = 0; l < level; l++)
3190         addr.offset += anv_minify(image->extent.depth, l) * 4;
3191   } else {
3192      addr.offset += level * image->array_size * 4;
3193   }
3194   addr.offset += array_layer * 4;
3195
3196   return addr;
3197}
3198
3199/* Returns true if a HiZ-enabled depth buffer can be sampled from. */
3200static inline bool
3201anv_can_sample_with_hiz(const struct gen_device_info * const devinfo,
3202                        const struct anv_image *image)
3203{
3204   if (!(image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
3205      return false;
3206
3207   /* Allow this feature on BDW even though it is disabled in the BDW devinfo
3208    * struct. There's documentation which suggests that this feature actually
3209    * reduces performance on BDW, but it has only been observed to help so
3210    * far. Sampling fast-cleared blocks on BDW must also be handled with care
3211    * (see depth_stencil_attachment_compute_aux_usage() for more info).
3212    */
3213   if (devinfo->gen != 8 && !devinfo->has_sample_with_hiz)
3214      return false;
3215
3216   return image->samples == 1;
3217}
3218
3219void
3220anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
3221                                  const struct anv_image *image,
3222                                  VkImageAspectFlagBits aspect,
3223                                  enum isl_aux_usage aux_usage,
3224                                  uint32_t level,
3225                                  uint32_t base_layer,
3226                                  uint32_t layer_count);
3227
3228void
3229anv_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
3230                      const struct anv_image *image,
3231                      VkImageAspectFlagBits aspect,
3232                      enum isl_aux_usage aux_usage,
3233                      enum isl_format format, struct isl_swizzle swizzle,
3234                      uint32_t level, uint32_t base_layer, uint32_t layer_count,
3235                      VkRect2D area, union isl_color_value clear_color);
3236void
3237anv_image_clear_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
3238                              const struct anv_image *image,
3239                              VkImageAspectFlags aspects,
3240                              enum isl_aux_usage depth_aux_usage,
3241                              uint32_t level,
3242                              uint32_t base_layer, uint32_t layer_count,
3243                              VkRect2D area,
3244                              float depth_value, uint8_t stencil_value);
3245void
3246anv_image_msaa_resolve(struct anv_cmd_buffer *cmd_buffer,
3247                       const struct anv_image *src_image,
3248                       enum isl_aux_usage src_aux_usage,
3249                       uint32_t src_level, uint32_t src_base_layer,
3250                       const struct anv_image *dst_image,
3251                       enum isl_aux_usage dst_aux_usage,
3252                       uint32_t dst_level, uint32_t dst_base_layer,
3253                       VkImageAspectFlagBits aspect,
3254                       uint32_t src_x, uint32_t src_y,
3255                       uint32_t dst_x, uint32_t dst_y,
3256                       uint32_t width, uint32_t height,
3257                       uint32_t layer_count,
3258                       enum blorp_filter filter);
3259void
3260anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
3261                 const struct anv_image *image,
3262                 VkImageAspectFlagBits aspect, uint32_t level,
3263                 uint32_t base_layer, uint32_t layer_count,
3264                 enum isl_aux_op hiz_op);
3265void
3266anv_image_hiz_clear(struct anv_cmd_buffer *cmd_buffer,
3267                    const struct anv_image *image,
3268                    VkImageAspectFlags aspects,
3269                    uint32_t level,
3270                    uint32_t base_layer, uint32_t layer_count,
3271                    VkRect2D area, uint8_t stencil_value);
3272void
3273anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
3274                 const struct anv_image *image,
3275                 enum isl_format format,
3276                 VkImageAspectFlagBits aspect,
3277                 uint32_t base_layer, uint32_t layer_count,
3278                 enum isl_aux_op mcs_op, union isl_color_value *clear_value,
3279                 bool predicate);
3280void
3281anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
3282                 const struct anv_image *image,
3283                 enum isl_format format,
3284                 VkImageAspectFlagBits aspect, uint32_t level,
3285                 uint32_t base_layer, uint32_t layer_count,
3286                 enum isl_aux_op ccs_op, union isl_color_value *clear_value,
3287                 bool predicate);
3288
3289void
3290anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
3291                         const struct anv_image *image,
3292                         uint32_t base_level, uint32_t level_count,
3293                         uint32_t base_layer, uint32_t layer_count);
3294
3295enum isl_aux_usage
3296anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
3297                        const struct anv_image *image,
3298                        const VkImageAspectFlagBits aspect,
3299                        const VkImageLayout layout);
3300
3301enum anv_fast_clear_type
3302anv_layout_to_fast_clear_type(const struct gen_device_info * const devinfo,
3303                              const struct anv_image * const image,
3304                              const VkImageAspectFlagBits aspect,
3305                              const VkImageLayout layout);
3306
3307/* This is defined as a macro so that it works for both
3308 * VkImageSubresourceRange and VkImageSubresourceLayers
3309 */
3310#define anv_get_layerCount(_image, _range) \
3311   ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
3312    (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
3313
3314static inline uint32_t
3315anv_get_levelCount(const struct anv_image *image,
3316                   const VkImageSubresourceRange *range)
3317{
3318   return range->levelCount == VK_REMAINING_MIP_LEVELS ?
3319          image->levels - range->baseMipLevel : range->levelCount;
3320}
3321
3322static inline VkImageAspectFlags
3323anv_image_expand_aspects(const struct anv_image *image,
3324                         VkImageAspectFlags aspects)
3325{
3326   /* If the underlying image has color plane aspects and
3327    * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
3328    * the underlying image. */
3329   if ((image->aspects & VK_IMAGE_ASPECT_PLANES_BITS_ANV) != 0 &&
3330       aspects == VK_IMAGE_ASPECT_COLOR_BIT)
3331      return image->aspects;
3332
3333   return aspects;
3334}
3335
3336static inline bool
3337anv_image_aspects_compatible(VkImageAspectFlags aspects1,
3338                             VkImageAspectFlags aspects2)
3339{
3340   if (aspects1 == aspects2)
3341      return true;
3342
3343   /* Only 1 color aspects are compatibles. */
3344   if ((aspects1 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
3345       (aspects2 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
3346       util_bitcount(aspects1) == util_bitcount(aspects2))
3347      return true;
3348
3349   return false;
3350}
3351
3352struct anv_image_view {
3353   const struct anv_image *image; /**< VkImageViewCreateInfo::image */
3354
3355   VkImageAspectFlags aspect_mask;
3356   VkFormat vk_format;
3357   VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
3358
3359   unsigned n_planes;
3360   struct {
3361      uint32_t image_plane;
3362
3363      struct isl_view isl;
3364
3365      /**
3366       * RENDER_SURFACE_STATE when using image as a sampler surface with an
3367       * image layout of SHADER_READ_ONLY_OPTIMAL or
3368       * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
3369       */
3370      struct anv_surface_state optimal_sampler_surface_state;
3371
3372      /**
3373       * RENDER_SURFACE_STATE when using image as a sampler surface with an
3374       * image layout of GENERAL.
3375       */
3376      struct anv_surface_state general_sampler_surface_state;
3377
3378      /**
3379       * RENDER_SURFACE_STATE when using image as a storage image. Separate
3380       * states for write-only and readable, using the real format for
3381       * write-only and the lowered format for readable.
3382       */
3383      struct anv_surface_state storage_surface_state;
3384      struct anv_surface_state writeonly_storage_surface_state;
3385
3386      struct brw_image_param storage_image_param;
3387   } planes[3];
3388};
3389
3390enum anv_image_view_state_flags {
3391   ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY   = (1 << 0),
3392   ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL      = (1 << 1),
3393};
3394
3395void anv_image_fill_surface_state(struct anv_device *device,
3396                                  const struct anv_image *image,
3397                                  VkImageAspectFlagBits aspect,
3398                                  const struct isl_view *view,
3399                                  isl_surf_usage_flags_t view_usage,
3400                                  enum isl_aux_usage aux_usage,
3401                                  const union isl_color_value *clear_color,
3402                                  enum anv_image_view_state_flags flags,
3403                                  struct anv_surface_state *state_inout,
3404                                  struct brw_image_param *image_param_out);
3405
3406struct anv_image_create_info {
3407   const VkImageCreateInfo *vk_info;
3408
3409   /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
3410   isl_tiling_flags_t isl_tiling_flags;
3411
3412   /** These flags will be added to any derived from VkImageCreateInfo. */
3413   isl_surf_usage_flags_t isl_extra_usage_flags;
3414
3415   uint32_t stride;
3416   bool external_format;
3417};
3418
3419VkResult anv_image_create(VkDevice _device,
3420                          const struct anv_image_create_info *info,
3421                          const VkAllocationCallbacks* alloc,
3422                          VkImage *pImage);
3423
3424const struct anv_surface *
3425anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
3426                                      VkImageAspectFlags aspect_mask);
3427
3428enum isl_format
3429anv_isl_format_for_descriptor_type(VkDescriptorType type);
3430
3431static inline struct VkExtent3D
3432anv_sanitize_image_extent(const VkImageType imageType,
3433                          const struct VkExtent3D imageExtent)
3434{
3435   switch (imageType) {
3436   case VK_IMAGE_TYPE_1D:
3437      return (VkExtent3D) { imageExtent.width, 1, 1 };
3438   case VK_IMAGE_TYPE_2D:
3439      return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
3440   case VK_IMAGE_TYPE_3D:
3441      return imageExtent;
3442   default:
3443      unreachable("invalid image type");
3444   }
3445}
3446
3447static inline struct VkOffset3D
3448anv_sanitize_image_offset(const VkImageType imageType,
3449                          const struct VkOffset3D imageOffset)
3450{
3451   switch (imageType) {
3452   case VK_IMAGE_TYPE_1D:
3453      return (VkOffset3D) { imageOffset.x, 0, 0 };
3454   case VK_IMAGE_TYPE_2D:
3455      return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
3456   case VK_IMAGE_TYPE_3D:
3457      return imageOffset;
3458   default:
3459      unreachable("invalid image type");
3460   }
3461}
3462
3463VkFormatFeatureFlags
3464anv_get_image_format_features(const struct gen_device_info *devinfo,
3465                              VkFormat vk_format,
3466                              const struct anv_format *anv_format,
3467                              VkImageTiling vk_tiling);
3468
3469void anv_fill_buffer_surface_state(struct anv_device *device,
3470                                   struct anv_state state,
3471                                   enum isl_format format,
3472                                   struct anv_address address,
3473                                   uint32_t range, uint32_t stride);
3474
3475static inline void
3476anv_clear_color_from_att_state(union isl_color_value *clear_color,
3477                               const struct anv_attachment_state *att_state,
3478                               const struct anv_image_view *iview)
3479{
3480   const struct isl_format_layout *view_fmtl =
3481      isl_format_get_layout(iview->planes[0].isl.format);
3482
3483#define COPY_CLEAR_COLOR_CHANNEL(c, i) \
3484   if (view_fmtl->channels.c.bits) \
3485      clear_color->u32[i] = att_state->clear_value.color.uint32[i]
3486
3487   COPY_CLEAR_COLOR_CHANNEL(r, 0);
3488   COPY_CLEAR_COLOR_CHANNEL(g, 1);
3489   COPY_CLEAR_COLOR_CHANNEL(b, 2);
3490   COPY_CLEAR_COLOR_CHANNEL(a, 3);
3491
3492#undef COPY_CLEAR_COLOR_CHANNEL
3493}
3494
3495
3496struct anv_ycbcr_conversion {
3497   const struct anv_format *        format;
3498   VkSamplerYcbcrModelConversion    ycbcr_model;
3499   VkSamplerYcbcrRange              ycbcr_range;
3500   VkComponentSwizzle               mapping[4];
3501   VkChromaLocation                 chroma_offsets[2];
3502   VkFilter                         chroma_filter;
3503   bool                             chroma_reconstruction;
3504};
3505
3506struct anv_sampler {
3507   uint32_t                     state[3][4];
3508   uint32_t                     n_planes;
3509   struct anv_ycbcr_conversion *conversion;
3510
3511   /* Blob of sampler state data which is guaranteed to be 32-byte aligned
3512    * and with a 32-byte stride for use as bindless samplers.
3513    */
3514   struct anv_state             bindless_state;
3515};
3516
3517struct anv_framebuffer {
3518   uint32_t                                     width;
3519   uint32_t                                     height;
3520   uint32_t                                     layers;
3521
3522   uint32_t                                     attachment_count;
3523   struct anv_image_view *                      attachments[0];
3524};
3525
3526struct anv_subpass_attachment {
3527   VkImageUsageFlagBits usage;
3528   uint32_t attachment;
3529   VkImageLayout layout;
3530};
3531
3532struct anv_subpass {
3533   uint32_t                                     attachment_count;
3534
3535   /**
3536    * A pointer to all attachment references used in this subpass.
3537    * Only valid if ::attachment_count > 0.
3538    */
3539   struct anv_subpass_attachment *              attachments;
3540   uint32_t                                     input_count;
3541   struct anv_subpass_attachment *              input_attachments;
3542   uint32_t                                     color_count;
3543   struct anv_subpass_attachment *              color_attachments;
3544   struct anv_subpass_attachment *              resolve_attachments;
3545
3546   struct anv_subpass_attachment *              depth_stencil_attachment;
3547   struct anv_subpass_attachment *              ds_resolve_attachment;
3548   VkResolveModeFlagBitsKHR                     depth_resolve_mode;
3549   VkResolveModeFlagBitsKHR                     stencil_resolve_mode;
3550
3551   uint32_t                                     view_mask;
3552
3553   /** Subpass has a depth/stencil self-dependency */
3554   bool                                         has_ds_self_dep;
3555
3556   /** Subpass has at least one color resolve attachment */
3557   bool                                         has_color_resolve;
3558};
3559
3560static inline unsigned
3561anv_subpass_view_count(const struct anv_subpass *subpass)
3562{
3563   return MAX2(1, util_bitcount(subpass->view_mask));
3564}
3565
3566struct anv_render_pass_attachment {
3567   /* TODO: Consider using VkAttachmentDescription instead of storing each of
3568    * its members individually.
3569    */
3570   VkFormat                                     format;
3571   uint32_t                                     samples;
3572   VkImageUsageFlags                            usage;
3573   VkAttachmentLoadOp                           load_op;
3574   VkAttachmentStoreOp                          store_op;
3575   VkAttachmentLoadOp                           stencil_load_op;
3576   VkImageLayout                                initial_layout;
3577   VkImageLayout                                final_layout;
3578   VkImageLayout                                first_subpass_layout;
3579
3580   /* The subpass id in which the attachment will be used last. */
3581   uint32_t                                     last_subpass_idx;
3582};
3583
3584struct anv_render_pass {
3585   uint32_t                                     attachment_count;
3586   uint32_t                                     subpass_count;
3587   /* An array of subpass_count+1 flushes, one per subpass boundary */
3588   enum anv_pipe_bits *                         subpass_flushes;
3589   struct anv_render_pass_attachment *          attachments;
3590   struct anv_subpass                           subpasses[0];
3591};
3592
3593#define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
3594
3595struct anv_query_pool {
3596   VkQueryType                                  type;
3597   VkQueryPipelineStatisticFlags                pipeline_statistics;
3598   /** Stride between slots, in bytes */
3599   uint32_t                                     stride;
3600   /** Number of slots in this query pool */
3601   uint32_t                                     slots;
3602   struct anv_bo                                bo;
3603};
3604
3605int anv_get_instance_entrypoint_index(const char *name);
3606int anv_get_device_entrypoint_index(const char *name);
3607
3608bool
3609anv_instance_entrypoint_is_enabled(int index, uint32_t core_version,
3610                                   const struct anv_instance_extension_table *instance);
3611
3612bool
3613anv_device_entrypoint_is_enabled(int index, uint32_t core_version,
3614                                 const struct anv_instance_extension_table *instance,
3615                                 const struct anv_device_extension_table *device);
3616
3617void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
3618                            const char *name);
3619
3620void anv_dump_image_to_ppm(struct anv_device *device,
3621                           struct anv_image *image, unsigned miplevel,
3622                           unsigned array_layer, VkImageAspectFlagBits aspect,
3623                           const char *filename);
3624
3625enum anv_dump_action {
3626   ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
3627};
3628
3629void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
3630void anv_dump_finish(void);
3631
3632void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
3633                              struct anv_framebuffer *fb);
3634
3635static inline uint32_t
3636anv_get_subpass_id(const struct anv_cmd_state * const cmd_state)
3637{
3638   /* This function must be called from within a subpass. */
3639   assert(cmd_state->pass && cmd_state->subpass);
3640
3641   const uint32_t subpass_id = cmd_state->subpass - cmd_state->pass->subpasses;
3642
3643   /* The id of this subpass shouldn't exceed the number of subpasses in this
3644    * render pass minus 1.
3645    */
3646   assert(subpass_id < cmd_state->pass->subpass_count);
3647   return subpass_id;
3648}
3649
3650#define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType)                      \
3651                                                                           \
3652   static inline struct __anv_type *                                       \
3653   __anv_type ## _from_handle(__VkType _handle)                            \
3654   {                                                                       \
3655      return (struct __anv_type *) _handle;                                \
3656   }                                                                       \
3657                                                                           \
3658   static inline __VkType                                                  \
3659   __anv_type ## _to_handle(struct __anv_type *_obj)                       \
3660   {                                                                       \
3661      return (__VkType) _obj;                                              \
3662   }
3663
3664#define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType)              \
3665                                                                           \
3666   static inline struct __anv_type *                                       \
3667   __anv_type ## _from_handle(__VkType _handle)                            \
3668   {                                                                       \
3669      return (struct __anv_type *)(uintptr_t) _handle;                     \
3670   }                                                                       \
3671                                                                           \
3672   static inline __VkType                                                  \
3673   __anv_type ## _to_handle(struct __anv_type *_obj)                       \
3674   {                                                                       \
3675      return (__VkType)(uintptr_t) _obj;                                   \
3676   }
3677
3678#define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
3679   struct __anv_type *__name = __anv_type ## _from_handle(__handle)
3680
3681ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
3682ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
3683ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
3684ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
3685ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
3686
3687ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
3688ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
3689ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
3690ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
3691ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
3692ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
3693ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, VkDescriptorUpdateTemplate)
3694ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
3695ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
3696ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
3697ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
3698ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
3699ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
3700ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
3701ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
3702ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
3703ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
3704ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
3705ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
3706ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore, VkSemaphore)
3707ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
3708ANV_DEFINE_NONDISP_HANDLE_CASTS(vk_debug_report_callback, VkDebugReportCallbackEXT)
3709ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion, VkSamplerYcbcrConversion)
3710
3711/* Gen-specific function declarations */
3712#ifdef genX
3713#  include "anv_genX.h"
3714#else
3715#  define genX(x) gen7_##x
3716#  include "anv_genX.h"
3717#  undef genX
3718#  define genX(x) gen75_##x
3719#  include "anv_genX.h"
3720#  undef genX
3721#  define genX(x) gen8_##x
3722#  include "anv_genX.h"
3723#  undef genX
3724#  define genX(x) gen9_##x
3725#  include "anv_genX.h"
3726#  undef genX
3727#  define genX(x) gen10_##x
3728#  include "anv_genX.h"
3729#  undef genX
3730#  define genX(x) gen11_##x
3731#  include "anv_genX.h"
3732#  undef genX
3733#endif
3734
3735#endif /* ANV_PRIVATE_H */
3736