1b8e80941Smrg/* 2b8e80941Smrg * Copyright © 2016 Red Hat. 3b8e80941Smrg * Copyright © 2016 Bas Nieuwenhuizen 4b8e80941Smrg * 5b8e80941Smrg * based in part on anv driver which is: 6b8e80941Smrg * Copyright © 2015 Intel Corporation 7b8e80941Smrg * 8b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a 9b8e80941Smrg * copy of this software and associated documentation files (the "Software"), 10b8e80941Smrg * to deal in the Software without restriction, including without limitation 11b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the 13b8e80941Smrg * Software is furnished to do so, subject to the following conditions: 14b8e80941Smrg * 15b8e80941Smrg * The above copyright notice and this permission notice (including the next 16b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the 17b8e80941Smrg * Software. 18b8e80941Smrg * 19b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25b8e80941Smrg * DEALINGS IN THE SOFTWARE. 26b8e80941Smrg */ 27b8e80941Smrg 28b8e80941Smrg#ifndef TU_PRIVATE_H 29b8e80941Smrg#define TU_PRIVATE_H 30b8e80941Smrg 31b8e80941Smrg#include <assert.h> 32b8e80941Smrg#include <pthread.h> 33b8e80941Smrg#include <stdbool.h> 34b8e80941Smrg#include <stdint.h> 35b8e80941Smrg#include <stdio.h> 36b8e80941Smrg#include <stdlib.h> 37b8e80941Smrg#include <string.h> 38b8e80941Smrg#ifdef HAVE_VALGRIND 39b8e80941Smrg#include <memcheck.h> 40b8e80941Smrg#include <valgrind.h> 41b8e80941Smrg#define VG(x) x 42b8e80941Smrg#else 43b8e80941Smrg#define VG(x) 44b8e80941Smrg#endif 45b8e80941Smrg 46b8e80941Smrg#include "c11/threads.h" 47b8e80941Smrg#include "compiler/shader_enums.h" 48b8e80941Smrg#include "main/macros.h" 49b8e80941Smrg#include "util/list.h" 50b8e80941Smrg#include "util/macros.h" 51b8e80941Smrg#include "vk_alloc.h" 52b8e80941Smrg#include "vk_debug_report.h" 53b8e80941Smrg#include "wsi_common.h" 54b8e80941Smrg 55b8e80941Smrg#include "drm/msm_drm.h" 56b8e80941Smrg#include "ir3/ir3_compiler.h" 57b8e80941Smrg#include "ir3/ir3_shader.h" 58b8e80941Smrg 59b8e80941Smrg#include "adreno_common.xml.h" 60b8e80941Smrg#include "adreno_pm4.xml.h" 61b8e80941Smrg#include "a6xx.xml.h" 62b8e80941Smrg 63b8e80941Smrg#include "tu_descriptor_set.h" 64b8e80941Smrg#include "tu_extensions.h" 65b8e80941Smrg 66b8e80941Smrg/* Pre-declarations needed for WSI entrypoints */ 67b8e80941Smrgstruct wl_surface; 68b8e80941Smrgstruct wl_display; 69b8e80941Smrgtypedef struct xcb_connection_t xcb_connection_t; 70b8e80941Smrgtypedef uint32_t xcb_visualid_t; 71b8e80941Smrgtypedef uint32_t xcb_window_t; 72b8e80941Smrg 73b8e80941Smrg#include <vulkan/vk_android_native_buffer.h> 74b8e80941Smrg#include <vulkan/vk_icd.h> 75b8e80941Smrg#include <vulkan/vulkan.h> 76b8e80941Smrg#include <vulkan/vulkan_intel.h> 77b8e80941Smrg 78b8e80941Smrg#include "tu_entrypoints.h" 79b8e80941Smrg 80b8e80941Smrg#define MAX_VBS 32 81b8e80941Smrg#define MAX_VERTEX_ATTRIBS 32 82b8e80941Smrg#define MAX_RTS 8 83b8e80941Smrg#define MAX_VSC_PIPES 32 84b8e80941Smrg#define MAX_VIEWPORTS 1 85b8e80941Smrg#define MAX_SCISSORS 16 86b8e80941Smrg#define MAX_DISCARD_RECTANGLES 4 87b8e80941Smrg#define MAX_PUSH_CONSTANTS_SIZE 128 88b8e80941Smrg#define MAX_PUSH_DESCRIPTORS 32 89b8e80941Smrg#define MAX_DYNAMIC_UNIFORM_BUFFERS 16 90b8e80941Smrg#define MAX_DYNAMIC_STORAGE_BUFFERS 8 91b8e80941Smrg#define MAX_DYNAMIC_BUFFERS \ 92b8e80941Smrg (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS) 93b8e80941Smrg#define MAX_SAMPLES_LOG2 4 94b8e80941Smrg#define NUM_META_FS_KEYS 13 95b8e80941Smrg#define TU_MAX_DRM_DEVICES 8 96b8e80941Smrg#define MAX_VIEWS 8 97b8e80941Smrg 98b8e80941Smrg#define NUM_DEPTH_CLEAR_PIPELINES 3 99b8e80941Smrg 100b8e80941Smrg/* 101b8e80941Smrg * This is the point we switch from using CP to compute shader 102b8e80941Smrg * for certain buffer operations. 103b8e80941Smrg */ 104b8e80941Smrg#define TU_BUFFER_OPS_CS_THRESHOLD 4096 105b8e80941Smrg 106b8e80941Smrgenum tu_mem_heap 107b8e80941Smrg{ 108b8e80941Smrg TU_MEM_HEAP_VRAM, 109b8e80941Smrg TU_MEM_HEAP_VRAM_CPU_ACCESS, 110b8e80941Smrg TU_MEM_HEAP_GTT, 111b8e80941Smrg TU_MEM_HEAP_COUNT 112b8e80941Smrg}; 113b8e80941Smrg 114b8e80941Smrgenum tu_mem_type 115b8e80941Smrg{ 116b8e80941Smrg TU_MEM_TYPE_VRAM, 117b8e80941Smrg TU_MEM_TYPE_GTT_WRITE_COMBINE, 118b8e80941Smrg TU_MEM_TYPE_VRAM_CPU_ACCESS, 119b8e80941Smrg TU_MEM_TYPE_GTT_CACHED, 120b8e80941Smrg TU_MEM_TYPE_COUNT 121b8e80941Smrg}; 122b8e80941Smrg 123b8e80941Smrg#define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b))) 124b8e80941Smrg 125b8e80941Smrgstatic inline uint32_t 126b8e80941Smrgalign_u32(uint32_t v, uint32_t a) 127b8e80941Smrg{ 128b8e80941Smrg assert(a != 0 && a == (a & -a)); 129b8e80941Smrg return (v + a - 1) & ~(a - 1); 130b8e80941Smrg} 131b8e80941Smrg 132b8e80941Smrgstatic inline uint32_t 133b8e80941Smrgalign_u32_npot(uint32_t v, uint32_t a) 134b8e80941Smrg{ 135b8e80941Smrg return (v + a - 1) / a * a; 136b8e80941Smrg} 137b8e80941Smrg 138b8e80941Smrgstatic inline uint64_t 139b8e80941Smrgalign_u64(uint64_t v, uint64_t a) 140b8e80941Smrg{ 141b8e80941Smrg assert(a != 0 && a == (a & -a)); 142b8e80941Smrg return (v + a - 1) & ~(a - 1); 143b8e80941Smrg} 144b8e80941Smrg 145b8e80941Smrgstatic inline int32_t 146b8e80941Smrgalign_i32(int32_t v, int32_t a) 147b8e80941Smrg{ 148b8e80941Smrg assert(a != 0 && a == (a & -a)); 149b8e80941Smrg return (v + a - 1) & ~(a - 1); 150b8e80941Smrg} 151b8e80941Smrg 152b8e80941Smrg/** Alignment must be a power of 2. */ 153b8e80941Smrgstatic inline bool 154b8e80941Smrgtu_is_aligned(uintmax_t n, uintmax_t a) 155b8e80941Smrg{ 156b8e80941Smrg assert(a == (a & -a)); 157b8e80941Smrg return (n & (a - 1)) == 0; 158b8e80941Smrg} 159b8e80941Smrg 160b8e80941Smrgstatic inline uint32_t 161b8e80941Smrground_up_u32(uint32_t v, uint32_t a) 162b8e80941Smrg{ 163b8e80941Smrg return (v + a - 1) / a; 164b8e80941Smrg} 165b8e80941Smrg 166b8e80941Smrgstatic inline uint64_t 167b8e80941Smrground_up_u64(uint64_t v, uint64_t a) 168b8e80941Smrg{ 169b8e80941Smrg return (v + a - 1) / a; 170b8e80941Smrg} 171b8e80941Smrg 172b8e80941Smrgstatic inline uint32_t 173b8e80941Smrgtu_minify(uint32_t n, uint32_t levels) 174b8e80941Smrg{ 175b8e80941Smrg if (unlikely(n == 0)) 176b8e80941Smrg return 0; 177b8e80941Smrg else 178b8e80941Smrg return MAX2(n >> levels, 1); 179b8e80941Smrg} 180b8e80941Smrgstatic inline float 181b8e80941Smrgtu_clamp_f(float f, float min, float max) 182b8e80941Smrg{ 183b8e80941Smrg assert(min < max); 184b8e80941Smrg 185b8e80941Smrg if (f > max) 186b8e80941Smrg return max; 187b8e80941Smrg else if (f < min) 188b8e80941Smrg return min; 189b8e80941Smrg else 190b8e80941Smrg return f; 191b8e80941Smrg} 192b8e80941Smrg 193b8e80941Smrgstatic inline bool 194b8e80941Smrgtu_clear_mask(uint32_t *inout_mask, uint32_t clear_mask) 195b8e80941Smrg{ 196b8e80941Smrg if (*inout_mask & clear_mask) { 197b8e80941Smrg *inout_mask &= ~clear_mask; 198b8e80941Smrg return true; 199b8e80941Smrg } else { 200b8e80941Smrg return false; 201b8e80941Smrg } 202b8e80941Smrg} 203b8e80941Smrg 204b8e80941Smrg#define for_each_bit(b, dword) \ 205b8e80941Smrg for (uint32_t __dword = (dword); \ 206b8e80941Smrg (b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b))) 207b8e80941Smrg 208b8e80941Smrg#define typed_memcpy(dest, src, count) \ 209b8e80941Smrg ({ \ 210b8e80941Smrg STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \ 211b8e80941Smrg memcpy((dest), (src), (count) * sizeof(*(src))); \ 212b8e80941Smrg }) 213b8e80941Smrg 214b8e80941Smrg/* Whenever we generate an error, pass it through this function. Useful for 215b8e80941Smrg * debugging, where we can break on it. Only call at error site, not when 216b8e80941Smrg * propagating errors. Might be useful to plug in a stack trace here. 217b8e80941Smrg */ 218b8e80941Smrg 219b8e80941Smrgstruct tu_instance; 220b8e80941Smrg 221b8e80941SmrgVkResult 222b8e80941Smrg__vk_errorf(struct tu_instance *instance, 223b8e80941Smrg VkResult error, 224b8e80941Smrg const char *file, 225b8e80941Smrg int line, 226b8e80941Smrg const char *format, 227b8e80941Smrg ...); 228b8e80941Smrg 229b8e80941Smrg#define vk_error(instance, error) \ 230b8e80941Smrg __vk_errorf(instance, error, __FILE__, __LINE__, NULL); 231b8e80941Smrg#define vk_errorf(instance, error, format, ...) \ 232b8e80941Smrg __vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__); 233b8e80941Smrg 234b8e80941Smrgvoid 235b8e80941Smrg__tu_finishme(const char *file, int line, const char *format, ...) 236b8e80941Smrg tu_printflike(3, 4); 237b8e80941Smrgvoid 238b8e80941Smrgtu_loge(const char *format, ...) tu_printflike(1, 2); 239b8e80941Smrgvoid 240b8e80941Smrgtu_loge_v(const char *format, va_list va); 241b8e80941Smrgvoid 242b8e80941Smrgtu_logi(const char *format, ...) tu_printflike(1, 2); 243b8e80941Smrgvoid 244b8e80941Smrgtu_logi_v(const char *format, va_list va); 245b8e80941Smrg 246b8e80941Smrg/** 247b8e80941Smrg * Print a FINISHME message, including its source location. 248b8e80941Smrg */ 249b8e80941Smrg#define tu_finishme(format, ...) \ 250b8e80941Smrg do { \ 251b8e80941Smrg static bool reported = false; \ 252b8e80941Smrg if (!reported) { \ 253b8e80941Smrg __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \ 254b8e80941Smrg reported = true; \ 255b8e80941Smrg } \ 256b8e80941Smrg } while (0) 257b8e80941Smrg 258b8e80941Smrg/* A non-fatal assert. Useful for debugging. */ 259b8e80941Smrg#ifdef DEBUG 260b8e80941Smrg#define tu_assert(x) \ 261b8e80941Smrg ({ \ 262b8e80941Smrg if (unlikely(!(x))) \ 263b8e80941Smrg fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \ 264b8e80941Smrg }) 265b8e80941Smrg#else 266b8e80941Smrg#define tu_assert(x) 267b8e80941Smrg#endif 268b8e80941Smrg 269b8e80941Smrg/* Suppress -Wunused in stub functions */ 270b8e80941Smrg#define tu_use_args(...) __tu_use_args(0, ##__VA_ARGS__) 271b8e80941Smrgstatic inline void 272b8e80941Smrg__tu_use_args(int ignore, ...) 273b8e80941Smrg{ 274b8e80941Smrg} 275b8e80941Smrg 276b8e80941Smrg#define tu_stub() \ 277b8e80941Smrg do { \ 278b8e80941Smrg tu_finishme("stub %s", __func__); \ 279b8e80941Smrg } while (0) 280b8e80941Smrg 281b8e80941Smrgvoid * 282b8e80941Smrgtu_lookup_entrypoint_unchecked(const char *name); 283b8e80941Smrgvoid * 284b8e80941Smrgtu_lookup_entrypoint_checked( 285b8e80941Smrg const char *name, 286b8e80941Smrg uint32_t core_version, 287b8e80941Smrg const struct tu_instance_extension_table *instance, 288b8e80941Smrg const struct tu_device_extension_table *device); 289b8e80941Smrg 290b8e80941Smrgstruct tu_physical_device 291b8e80941Smrg{ 292b8e80941Smrg VK_LOADER_DATA _loader_data; 293b8e80941Smrg 294b8e80941Smrg struct tu_instance *instance; 295b8e80941Smrg 296b8e80941Smrg char path[20]; 297b8e80941Smrg char name[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; 298b8e80941Smrg uint8_t driver_uuid[VK_UUID_SIZE]; 299b8e80941Smrg uint8_t device_uuid[VK_UUID_SIZE]; 300b8e80941Smrg uint8_t cache_uuid[VK_UUID_SIZE]; 301b8e80941Smrg 302b8e80941Smrg struct wsi_device wsi_device; 303b8e80941Smrg 304b8e80941Smrg int local_fd; 305b8e80941Smrg int master_fd; 306b8e80941Smrg 307b8e80941Smrg unsigned gpu_id; 308b8e80941Smrg uint32_t gmem_size; 309b8e80941Smrg uint32_t tile_align_w; 310b8e80941Smrg uint32_t tile_align_h; 311b8e80941Smrg 312b8e80941Smrg /* This is the drivers on-disk cache used as a fallback as opposed to 313b8e80941Smrg * the pipeline cache defined by apps. 314b8e80941Smrg */ 315b8e80941Smrg struct disk_cache *disk_cache; 316b8e80941Smrg 317b8e80941Smrg struct tu_device_extension_table supported_extensions; 318b8e80941Smrg}; 319b8e80941Smrg 320b8e80941Smrgenum tu_debug_flags 321b8e80941Smrg{ 322b8e80941Smrg TU_DEBUG_STARTUP = 1 << 0, 323b8e80941Smrg TU_DEBUG_NIR = 1 << 1, 324b8e80941Smrg TU_DEBUG_IR3 = 1 << 2, 325b8e80941Smrg}; 326b8e80941Smrg 327b8e80941Smrgstruct tu_instance 328b8e80941Smrg{ 329b8e80941Smrg VK_LOADER_DATA _loader_data; 330b8e80941Smrg 331b8e80941Smrg VkAllocationCallbacks alloc; 332b8e80941Smrg 333b8e80941Smrg uint32_t api_version; 334b8e80941Smrg int physical_device_count; 335b8e80941Smrg struct tu_physical_device physical_devices[TU_MAX_DRM_DEVICES]; 336b8e80941Smrg 337b8e80941Smrg enum tu_debug_flags debug_flags; 338b8e80941Smrg 339b8e80941Smrg struct vk_debug_report_instance debug_report_callbacks; 340b8e80941Smrg 341b8e80941Smrg struct tu_instance_extension_table enabled_extensions; 342b8e80941Smrg}; 343b8e80941Smrg 344b8e80941SmrgVkResult 345b8e80941Smrgtu_wsi_init(struct tu_physical_device *physical_device); 346b8e80941Smrgvoid 347b8e80941Smrgtu_wsi_finish(struct tu_physical_device *physical_device); 348b8e80941Smrg 349b8e80941Smrgbool 350b8e80941Smrgtu_instance_extension_supported(const char *name); 351b8e80941Smrguint32_t 352b8e80941Smrgtu_physical_device_api_version(struct tu_physical_device *dev); 353b8e80941Smrgbool 354b8e80941Smrgtu_physical_device_extension_supported(struct tu_physical_device *dev, 355b8e80941Smrg const char *name); 356b8e80941Smrg 357b8e80941Smrgstruct cache_entry; 358b8e80941Smrg 359b8e80941Smrgstruct tu_pipeline_cache 360b8e80941Smrg{ 361b8e80941Smrg struct tu_device *device; 362b8e80941Smrg pthread_mutex_t mutex; 363b8e80941Smrg 364b8e80941Smrg uint32_t total_size; 365b8e80941Smrg uint32_t table_size; 366b8e80941Smrg uint32_t kernel_count; 367b8e80941Smrg struct cache_entry **hash_table; 368b8e80941Smrg bool modified; 369b8e80941Smrg 370b8e80941Smrg VkAllocationCallbacks alloc; 371b8e80941Smrg}; 372b8e80941Smrg 373b8e80941Smrgstruct tu_pipeline_key 374b8e80941Smrg{ 375b8e80941Smrg}; 376b8e80941Smrg 377b8e80941Smrgvoid 378b8e80941Smrgtu_pipeline_cache_init(struct tu_pipeline_cache *cache, 379b8e80941Smrg struct tu_device *device); 380b8e80941Smrgvoid 381b8e80941Smrgtu_pipeline_cache_finish(struct tu_pipeline_cache *cache); 382b8e80941Smrgvoid 383b8e80941Smrgtu_pipeline_cache_load(struct tu_pipeline_cache *cache, 384b8e80941Smrg const void *data, 385b8e80941Smrg size_t size); 386b8e80941Smrg 387b8e80941Smrgstruct tu_shader_variant; 388b8e80941Smrg 389b8e80941Smrgbool 390b8e80941Smrgtu_create_shader_variants_from_pipeline_cache( 391b8e80941Smrg struct tu_device *device, 392b8e80941Smrg struct tu_pipeline_cache *cache, 393b8e80941Smrg const unsigned char *sha1, 394b8e80941Smrg struct tu_shader_variant **variants); 395b8e80941Smrg 396b8e80941Smrgvoid 397b8e80941Smrgtu_pipeline_cache_insert_shaders(struct tu_device *device, 398b8e80941Smrg struct tu_pipeline_cache *cache, 399b8e80941Smrg const unsigned char *sha1, 400b8e80941Smrg struct tu_shader_variant **variants, 401b8e80941Smrg const void *const *codes, 402b8e80941Smrg const unsigned *code_sizes); 403b8e80941Smrg 404b8e80941Smrgstruct tu_meta_state 405b8e80941Smrg{ 406b8e80941Smrg VkAllocationCallbacks alloc; 407b8e80941Smrg 408b8e80941Smrg struct tu_pipeline_cache cache; 409b8e80941Smrg}; 410b8e80941Smrg 411b8e80941Smrg/* queue types */ 412b8e80941Smrg#define TU_QUEUE_GENERAL 0 413b8e80941Smrg 414b8e80941Smrg#define TU_MAX_QUEUE_FAMILIES 1 415b8e80941Smrg 416b8e80941Smrgstruct tu_fence 417b8e80941Smrg{ 418b8e80941Smrg bool signaled; 419b8e80941Smrg int fd; 420b8e80941Smrg}; 421b8e80941Smrg 422b8e80941Smrgvoid 423b8e80941Smrgtu_fence_init(struct tu_fence *fence, bool signaled); 424b8e80941Smrgvoid 425b8e80941Smrgtu_fence_finish(struct tu_fence *fence); 426b8e80941Smrgvoid 427b8e80941Smrgtu_fence_update_fd(struct tu_fence *fence, int fd); 428b8e80941Smrgvoid 429b8e80941Smrgtu_fence_copy(struct tu_fence *fence, const struct tu_fence *src); 430b8e80941Smrgvoid 431b8e80941Smrgtu_fence_signal(struct tu_fence *fence); 432b8e80941Smrgvoid 433b8e80941Smrgtu_fence_wait_idle(struct tu_fence *fence); 434b8e80941Smrg 435b8e80941Smrgstruct tu_queue 436b8e80941Smrg{ 437b8e80941Smrg VK_LOADER_DATA _loader_data; 438b8e80941Smrg struct tu_device *device; 439b8e80941Smrg uint32_t queue_family_index; 440b8e80941Smrg int queue_idx; 441b8e80941Smrg VkDeviceQueueCreateFlags flags; 442b8e80941Smrg 443b8e80941Smrg uint32_t msm_queue_id; 444b8e80941Smrg struct tu_fence submit_fence; 445b8e80941Smrg}; 446b8e80941Smrg 447b8e80941Smrgstruct tu_device 448b8e80941Smrg{ 449b8e80941Smrg VK_LOADER_DATA _loader_data; 450b8e80941Smrg 451b8e80941Smrg VkAllocationCallbacks alloc; 452b8e80941Smrg 453b8e80941Smrg struct tu_instance *instance; 454b8e80941Smrg 455b8e80941Smrg struct tu_meta_state meta_state; 456b8e80941Smrg 457b8e80941Smrg struct tu_queue *queues[TU_MAX_QUEUE_FAMILIES]; 458b8e80941Smrg int queue_count[TU_MAX_QUEUE_FAMILIES]; 459b8e80941Smrg 460b8e80941Smrg struct tu_physical_device *physical_device; 461b8e80941Smrg 462b8e80941Smrg struct ir3_compiler *compiler; 463b8e80941Smrg 464b8e80941Smrg /* Backup in-memory cache to be used if the app doesn't provide one */ 465b8e80941Smrg struct tu_pipeline_cache *mem_cache; 466b8e80941Smrg 467b8e80941Smrg struct list_head shader_slabs; 468b8e80941Smrg mtx_t shader_slab_mutex; 469b8e80941Smrg 470b8e80941Smrg struct tu_device_extension_table enabled_extensions; 471b8e80941Smrg}; 472b8e80941Smrg 473b8e80941Smrgstruct tu_bo 474b8e80941Smrg{ 475b8e80941Smrg uint32_t gem_handle; 476b8e80941Smrg uint64_t size; 477b8e80941Smrg uint64_t iova; 478b8e80941Smrg void *map; 479b8e80941Smrg}; 480b8e80941Smrg 481b8e80941SmrgVkResult 482b8e80941Smrgtu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size); 483b8e80941SmrgVkResult 484b8e80941Smrgtu_bo_init_dmabuf(struct tu_device *dev, 485b8e80941Smrg struct tu_bo *bo, 486b8e80941Smrg uint64_t size, 487b8e80941Smrg int fd); 488b8e80941Smrgint 489b8e80941Smrgtu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo); 490b8e80941Smrgvoid 491b8e80941Smrgtu_bo_finish(struct tu_device *dev, struct tu_bo *bo); 492b8e80941SmrgVkResult 493b8e80941Smrgtu_bo_map(struct tu_device *dev, struct tu_bo *bo); 494b8e80941Smrg 495b8e80941Smrgstruct tu_cs_entry 496b8e80941Smrg{ 497b8e80941Smrg /* No ownership */ 498b8e80941Smrg const struct tu_bo *bo; 499b8e80941Smrg 500b8e80941Smrg uint32_t size; 501b8e80941Smrg uint32_t offset; 502b8e80941Smrg}; 503b8e80941Smrg 504b8e80941Smrgenum tu_cs_mode 505b8e80941Smrg{ 506b8e80941Smrg 507b8e80941Smrg /* 508b8e80941Smrg * A command stream in TU_CS_MODE_GROW mode grows automatically whenever it 509b8e80941Smrg * is full. tu_cs_begin must be called before command packet emission and 510b8e80941Smrg * tu_cs_end must be called after. 511b8e80941Smrg * 512b8e80941Smrg * This mode may create multiple entries internally. The entries must be 513b8e80941Smrg * submitted together. 514b8e80941Smrg */ 515b8e80941Smrg TU_CS_MODE_GROW, 516b8e80941Smrg 517b8e80941Smrg /* 518b8e80941Smrg * A command stream in TU_CS_MODE_EXTERNAL mode wraps an external, 519b8e80941Smrg * fixed-size buffer. tu_cs_begin and tu_cs_end are optional and have no 520b8e80941Smrg * effect on it. 521b8e80941Smrg * 522b8e80941Smrg * This mode does not create any entry or any BO. 523b8e80941Smrg */ 524b8e80941Smrg TU_CS_MODE_EXTERNAL, 525b8e80941Smrg 526b8e80941Smrg /* 527b8e80941Smrg * A command stream in TU_CS_MODE_SUB_STREAM mode does not support direct 528b8e80941Smrg * command packet emission. tu_cs_begin_sub_stream must be called to get a 529b8e80941Smrg * sub-stream to emit comamnd packets to. When done with the sub-stream, 530b8e80941Smrg * tu_cs_end_sub_stream must be called. 531b8e80941Smrg * 532b8e80941Smrg * This mode does not create any entry internally. 533b8e80941Smrg */ 534b8e80941Smrg TU_CS_MODE_SUB_STREAM, 535b8e80941Smrg}; 536b8e80941Smrg 537b8e80941Smrgstruct tu_cs 538b8e80941Smrg{ 539b8e80941Smrg uint32_t *start; 540b8e80941Smrg uint32_t *cur; 541b8e80941Smrg uint32_t *reserved_end; 542b8e80941Smrg uint32_t *end; 543b8e80941Smrg 544b8e80941Smrg enum tu_cs_mode mode; 545b8e80941Smrg uint32_t next_bo_size; 546b8e80941Smrg 547b8e80941Smrg struct tu_cs_entry *entries; 548b8e80941Smrg uint32_t entry_count; 549b8e80941Smrg uint32_t entry_capacity; 550b8e80941Smrg 551b8e80941Smrg struct tu_bo **bos; 552b8e80941Smrg uint32_t bo_count; 553b8e80941Smrg uint32_t bo_capacity; 554b8e80941Smrg}; 555b8e80941Smrg 556b8e80941Smrgstruct tu_device_memory 557b8e80941Smrg{ 558b8e80941Smrg struct tu_bo bo; 559b8e80941Smrg VkDeviceSize size; 560b8e80941Smrg 561b8e80941Smrg /* for dedicated allocations */ 562b8e80941Smrg struct tu_image *image; 563b8e80941Smrg struct tu_buffer *buffer; 564b8e80941Smrg 565b8e80941Smrg uint32_t type_index; 566b8e80941Smrg void *map; 567b8e80941Smrg void *user_ptr; 568b8e80941Smrg}; 569b8e80941Smrg 570b8e80941Smrgstruct tu_descriptor_range 571b8e80941Smrg{ 572b8e80941Smrg uint64_t va; 573b8e80941Smrg uint32_t size; 574b8e80941Smrg}; 575b8e80941Smrg 576b8e80941Smrgstruct tu_descriptor_set 577b8e80941Smrg{ 578b8e80941Smrg const struct tu_descriptor_set_layout *layout; 579b8e80941Smrg uint32_t size; 580b8e80941Smrg 581b8e80941Smrg uint64_t va; 582b8e80941Smrg uint32_t *mapped_ptr; 583b8e80941Smrg struct tu_descriptor_range *dynamic_descriptors; 584b8e80941Smrg}; 585b8e80941Smrg 586b8e80941Smrgstruct tu_push_descriptor_set 587b8e80941Smrg{ 588b8e80941Smrg struct tu_descriptor_set set; 589b8e80941Smrg uint32_t capacity; 590b8e80941Smrg}; 591b8e80941Smrg 592b8e80941Smrgstruct tu_descriptor_pool_entry 593b8e80941Smrg{ 594b8e80941Smrg uint32_t offset; 595b8e80941Smrg uint32_t size; 596b8e80941Smrg struct tu_descriptor_set *set; 597b8e80941Smrg}; 598b8e80941Smrg 599b8e80941Smrgstruct tu_descriptor_pool 600b8e80941Smrg{ 601b8e80941Smrg uint8_t *mapped_ptr; 602b8e80941Smrg uint64_t current_offset; 603b8e80941Smrg uint64_t size; 604b8e80941Smrg 605b8e80941Smrg uint8_t *host_memory_base; 606b8e80941Smrg uint8_t *host_memory_ptr; 607b8e80941Smrg uint8_t *host_memory_end; 608b8e80941Smrg 609b8e80941Smrg uint32_t entry_count; 610b8e80941Smrg uint32_t max_entry_count; 611b8e80941Smrg struct tu_descriptor_pool_entry entries[0]; 612b8e80941Smrg}; 613b8e80941Smrg 614b8e80941Smrgstruct tu_descriptor_update_template_entry 615b8e80941Smrg{ 616b8e80941Smrg VkDescriptorType descriptor_type; 617b8e80941Smrg 618b8e80941Smrg /* The number of descriptors to update */ 619b8e80941Smrg uint32_t descriptor_count; 620b8e80941Smrg 621b8e80941Smrg /* Into mapped_ptr or dynamic_descriptors, in units of the respective array 622b8e80941Smrg */ 623b8e80941Smrg uint32_t dst_offset; 624b8e80941Smrg 625b8e80941Smrg /* In dwords. Not valid/used for dynamic descriptors */ 626b8e80941Smrg uint32_t dst_stride; 627b8e80941Smrg 628b8e80941Smrg uint32_t buffer_offset; 629b8e80941Smrg 630b8e80941Smrg /* Only valid for combined image samplers and samplers */ 631b8e80941Smrg uint16_t has_sampler; 632b8e80941Smrg 633b8e80941Smrg /* In bytes */ 634b8e80941Smrg size_t src_offset; 635b8e80941Smrg size_t src_stride; 636b8e80941Smrg 637b8e80941Smrg /* For push descriptors */ 638b8e80941Smrg const uint32_t *immutable_samplers; 639b8e80941Smrg}; 640b8e80941Smrg 641b8e80941Smrgstruct tu_descriptor_update_template 642b8e80941Smrg{ 643b8e80941Smrg uint32_t entry_count; 644b8e80941Smrg VkPipelineBindPoint bind_point; 645b8e80941Smrg struct tu_descriptor_update_template_entry entry[0]; 646b8e80941Smrg}; 647b8e80941Smrg 648b8e80941Smrgstruct tu_buffer 649b8e80941Smrg{ 650b8e80941Smrg VkDeviceSize size; 651b8e80941Smrg 652b8e80941Smrg VkBufferUsageFlags usage; 653b8e80941Smrg VkBufferCreateFlags flags; 654b8e80941Smrg 655b8e80941Smrg struct tu_bo *bo; 656b8e80941Smrg VkDeviceSize bo_offset; 657b8e80941Smrg}; 658b8e80941Smrg 659b8e80941Smrgenum tu_dynamic_state_bits 660b8e80941Smrg{ 661b8e80941Smrg TU_DYNAMIC_VIEWPORT = 1 << 0, 662b8e80941Smrg TU_DYNAMIC_SCISSOR = 1 << 1, 663b8e80941Smrg TU_DYNAMIC_LINE_WIDTH = 1 << 2, 664b8e80941Smrg TU_DYNAMIC_DEPTH_BIAS = 1 << 3, 665b8e80941Smrg TU_DYNAMIC_BLEND_CONSTANTS = 1 << 4, 666b8e80941Smrg TU_DYNAMIC_DEPTH_BOUNDS = 1 << 5, 667b8e80941Smrg TU_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, 668b8e80941Smrg TU_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, 669b8e80941Smrg TU_DYNAMIC_STENCIL_REFERENCE = 1 << 8, 670b8e80941Smrg TU_DYNAMIC_DISCARD_RECTANGLE = 1 << 9, 671b8e80941Smrg TU_DYNAMIC_ALL = (1 << 10) - 1, 672b8e80941Smrg}; 673b8e80941Smrg 674b8e80941Smrgstruct tu_vertex_binding 675b8e80941Smrg{ 676b8e80941Smrg struct tu_buffer *buffer; 677b8e80941Smrg VkDeviceSize offset; 678b8e80941Smrg}; 679b8e80941Smrg 680b8e80941Smrgstruct tu_viewport_state 681b8e80941Smrg{ 682b8e80941Smrg uint32_t count; 683b8e80941Smrg VkViewport viewports[MAX_VIEWPORTS]; 684b8e80941Smrg}; 685b8e80941Smrg 686b8e80941Smrgstruct tu_scissor_state 687b8e80941Smrg{ 688b8e80941Smrg uint32_t count; 689b8e80941Smrg VkRect2D scissors[MAX_SCISSORS]; 690b8e80941Smrg}; 691b8e80941Smrg 692b8e80941Smrgstruct tu_discard_rectangle_state 693b8e80941Smrg{ 694b8e80941Smrg uint32_t count; 695b8e80941Smrg VkRect2D rectangles[MAX_DISCARD_RECTANGLES]; 696b8e80941Smrg}; 697b8e80941Smrg 698b8e80941Smrgstruct tu_dynamic_state 699b8e80941Smrg{ 700b8e80941Smrg /** 701b8e80941Smrg * Bitmask of (1 << VK_DYNAMIC_STATE_*). 702b8e80941Smrg * Defines the set of saved dynamic state. 703b8e80941Smrg */ 704b8e80941Smrg uint32_t mask; 705b8e80941Smrg 706b8e80941Smrg struct tu_viewport_state viewport; 707b8e80941Smrg 708b8e80941Smrg struct tu_scissor_state scissor; 709b8e80941Smrg 710b8e80941Smrg float line_width; 711b8e80941Smrg 712b8e80941Smrg struct 713b8e80941Smrg { 714b8e80941Smrg float bias; 715b8e80941Smrg float clamp; 716b8e80941Smrg float slope; 717b8e80941Smrg } depth_bias; 718b8e80941Smrg 719b8e80941Smrg float blend_constants[4]; 720b8e80941Smrg 721b8e80941Smrg struct 722b8e80941Smrg { 723b8e80941Smrg float min; 724b8e80941Smrg float max; 725b8e80941Smrg } depth_bounds; 726b8e80941Smrg 727b8e80941Smrg struct 728b8e80941Smrg { 729b8e80941Smrg uint32_t front; 730b8e80941Smrg uint32_t back; 731b8e80941Smrg } stencil_compare_mask; 732b8e80941Smrg 733b8e80941Smrg struct 734b8e80941Smrg { 735b8e80941Smrg uint32_t front; 736b8e80941Smrg uint32_t back; 737b8e80941Smrg } stencil_write_mask; 738b8e80941Smrg 739b8e80941Smrg struct 740b8e80941Smrg { 741b8e80941Smrg uint32_t front; 742b8e80941Smrg uint32_t back; 743b8e80941Smrg } stencil_reference; 744b8e80941Smrg 745b8e80941Smrg struct tu_discard_rectangle_state discard_rectangle; 746b8e80941Smrg}; 747b8e80941Smrg 748b8e80941Smrgextern const struct tu_dynamic_state default_dynamic_state; 749b8e80941Smrg 750b8e80941Smrgconst char * 751b8e80941Smrgtu_get_debug_option_name(int id); 752b8e80941Smrg 753b8e80941Smrgconst char * 754b8e80941Smrgtu_get_perftest_option_name(int id); 755b8e80941Smrg 756b8e80941Smrg/** 757b8e80941Smrg * Attachment state when recording a renderpass instance. 758b8e80941Smrg * 759b8e80941Smrg * The clear value is valid only if there exists a pending clear. 760b8e80941Smrg */ 761b8e80941Smrgstruct tu_attachment_state 762b8e80941Smrg{ 763b8e80941Smrg VkImageAspectFlags pending_clear_aspects; 764b8e80941Smrg uint32_t cleared_views; 765b8e80941Smrg VkClearValue clear_value; 766b8e80941Smrg VkImageLayout current_layout; 767b8e80941Smrg}; 768b8e80941Smrg 769b8e80941Smrgstruct tu_descriptor_state 770b8e80941Smrg{ 771b8e80941Smrg struct tu_descriptor_set *sets[MAX_SETS]; 772b8e80941Smrg uint32_t dirty; 773b8e80941Smrg uint32_t valid; 774b8e80941Smrg struct tu_push_descriptor_set push_set; 775b8e80941Smrg bool push_dirty; 776b8e80941Smrg uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS]; 777b8e80941Smrg}; 778b8e80941Smrg 779b8e80941Smrgstruct tu_tile 780b8e80941Smrg{ 781b8e80941Smrg uint8_t pipe; 782b8e80941Smrg uint8_t slot; 783b8e80941Smrg VkOffset2D begin; 784b8e80941Smrg VkOffset2D end; 785b8e80941Smrg}; 786b8e80941Smrg 787b8e80941Smrgstruct tu_tiling_config 788b8e80941Smrg{ 789b8e80941Smrg VkRect2D render_area; 790b8e80941Smrg uint32_t buffer_cpp[MAX_RTS + 2]; 791b8e80941Smrg uint32_t buffer_count; 792b8e80941Smrg 793b8e80941Smrg /* position and size of the first tile */ 794b8e80941Smrg VkRect2D tile0; 795b8e80941Smrg /* number of tiles */ 796b8e80941Smrg VkExtent2D tile_count; 797b8e80941Smrg 798b8e80941Smrg uint32_t gmem_offsets[MAX_RTS + 2]; 799b8e80941Smrg 800b8e80941Smrg /* size of the first VSC pipe */ 801b8e80941Smrg VkExtent2D pipe0; 802b8e80941Smrg /* number of VSC pipes */ 803b8e80941Smrg VkExtent2D pipe_count; 804b8e80941Smrg 805b8e80941Smrg /* pipe register values */ 806b8e80941Smrg uint32_t pipe_config[MAX_VSC_PIPES]; 807b8e80941Smrg uint32_t pipe_sizes[MAX_VSC_PIPES]; 808b8e80941Smrg}; 809b8e80941Smrg 810b8e80941Smrgenum tu_cmd_dirty_bits 811b8e80941Smrg{ 812b8e80941Smrg TU_CMD_DIRTY_PIPELINE = 1 << 0, 813b8e80941Smrg TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 1, 814b8e80941Smrg 815b8e80941Smrg TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 16, 816b8e80941Smrg TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 17, 817b8e80941Smrg TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 18, 818b8e80941Smrg TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 19, 819b8e80941Smrg}; 820b8e80941Smrg 821b8e80941Smrgstruct tu_cmd_state 822b8e80941Smrg{ 823b8e80941Smrg uint32_t dirty; 824b8e80941Smrg 825b8e80941Smrg struct tu_pipeline *pipeline; 826b8e80941Smrg 827b8e80941Smrg /* Vertex buffers */ 828b8e80941Smrg struct 829b8e80941Smrg { 830b8e80941Smrg struct tu_buffer *buffers[MAX_VBS]; 831b8e80941Smrg VkDeviceSize offsets[MAX_VBS]; 832b8e80941Smrg } vb; 833b8e80941Smrg 834b8e80941Smrg struct tu_dynamic_state dynamic; 835b8e80941Smrg 836b8e80941Smrg /* Index buffer */ 837b8e80941Smrg struct tu_buffer *index_buffer; 838b8e80941Smrg uint64_t index_offset; 839b8e80941Smrg uint32_t index_type; 840b8e80941Smrg uint32_t max_index_count; 841b8e80941Smrg uint64_t index_va; 842b8e80941Smrg 843b8e80941Smrg const struct tu_render_pass *pass; 844b8e80941Smrg const struct tu_subpass *subpass; 845b8e80941Smrg const struct tu_framebuffer *framebuffer; 846b8e80941Smrg struct tu_attachment_state *attachments; 847b8e80941Smrg 848b8e80941Smrg struct tu_tiling_config tiling_config; 849b8e80941Smrg 850b8e80941Smrg struct tu_cs_entry tile_load_ib; 851b8e80941Smrg struct tu_cs_entry tile_store_ib; 852b8e80941Smrg}; 853b8e80941Smrg 854b8e80941Smrgstruct tu_cmd_pool 855b8e80941Smrg{ 856b8e80941Smrg VkAllocationCallbacks alloc; 857b8e80941Smrg struct list_head cmd_buffers; 858b8e80941Smrg struct list_head free_cmd_buffers; 859b8e80941Smrg uint32_t queue_family_index; 860b8e80941Smrg}; 861b8e80941Smrg 862b8e80941Smrgstruct tu_cmd_buffer_upload 863b8e80941Smrg{ 864b8e80941Smrg uint8_t *map; 865b8e80941Smrg unsigned offset; 866b8e80941Smrg uint64_t size; 867b8e80941Smrg struct list_head list; 868b8e80941Smrg}; 869b8e80941Smrg 870b8e80941Smrgenum tu_cmd_buffer_status 871b8e80941Smrg{ 872b8e80941Smrg TU_CMD_BUFFER_STATUS_INVALID, 873b8e80941Smrg TU_CMD_BUFFER_STATUS_INITIAL, 874b8e80941Smrg TU_CMD_BUFFER_STATUS_RECORDING, 875b8e80941Smrg TU_CMD_BUFFER_STATUS_EXECUTABLE, 876b8e80941Smrg TU_CMD_BUFFER_STATUS_PENDING, 877b8e80941Smrg}; 878b8e80941Smrg 879b8e80941Smrgstruct tu_bo_list 880b8e80941Smrg{ 881b8e80941Smrg uint32_t count; 882b8e80941Smrg uint32_t capacity; 883b8e80941Smrg struct drm_msm_gem_submit_bo *bo_infos; 884b8e80941Smrg}; 885b8e80941Smrg 886b8e80941Smrg#define TU_BO_LIST_FAILED (~0) 887b8e80941Smrg 888b8e80941Smrgvoid 889b8e80941Smrgtu_bo_list_init(struct tu_bo_list *list); 890b8e80941Smrgvoid 891b8e80941Smrgtu_bo_list_destroy(struct tu_bo_list *list); 892b8e80941Smrgvoid 893b8e80941Smrgtu_bo_list_reset(struct tu_bo_list *list); 894b8e80941Smrguint32_t 895b8e80941Smrgtu_bo_list_add(struct tu_bo_list *list, 896b8e80941Smrg const struct tu_bo *bo, 897b8e80941Smrg uint32_t flags); 898b8e80941SmrgVkResult 899b8e80941Smrgtu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other); 900b8e80941Smrg 901b8e80941Smrgstruct tu_cmd_buffer 902b8e80941Smrg{ 903b8e80941Smrg VK_LOADER_DATA _loader_data; 904b8e80941Smrg 905b8e80941Smrg struct tu_device *device; 906b8e80941Smrg 907b8e80941Smrg struct tu_cmd_pool *pool; 908b8e80941Smrg struct list_head pool_link; 909b8e80941Smrg 910b8e80941Smrg VkCommandBufferUsageFlags usage_flags; 911b8e80941Smrg VkCommandBufferLevel level; 912b8e80941Smrg enum tu_cmd_buffer_status status; 913b8e80941Smrg 914b8e80941Smrg struct tu_cmd_state state; 915b8e80941Smrg struct tu_vertex_binding vertex_bindings[MAX_VBS]; 916b8e80941Smrg uint32_t queue_family_index; 917b8e80941Smrg 918b8e80941Smrg uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE]; 919b8e80941Smrg VkShaderStageFlags push_constant_stages; 920b8e80941Smrg struct tu_descriptor_set meta_push_descriptors; 921b8e80941Smrg 922b8e80941Smrg struct tu_descriptor_state descriptors[VK_PIPELINE_BIND_POINT_RANGE_SIZE]; 923b8e80941Smrg 924b8e80941Smrg struct tu_cmd_buffer_upload upload; 925b8e80941Smrg 926b8e80941Smrg VkResult record_result; 927b8e80941Smrg 928b8e80941Smrg struct tu_bo_list bo_list; 929b8e80941Smrg struct tu_cs cs; 930b8e80941Smrg struct tu_cs draw_cs; 931b8e80941Smrg struct tu_cs tile_cs; 932b8e80941Smrg 933b8e80941Smrg uint16_t marker_reg; 934b8e80941Smrg uint32_t marker_seqno; 935b8e80941Smrg 936b8e80941Smrg struct tu_bo scratch_bo; 937b8e80941Smrg uint32_t scratch_seqno; 938b8e80941Smrg 939b8e80941Smrg bool wait_for_idle; 940b8e80941Smrg}; 941b8e80941Smrg 942b8e80941Smrgvoid 943b8e80941Smrgtu6_emit_event_write(struct tu_cmd_buffer *cmd, 944b8e80941Smrg struct tu_cs *cs, 945b8e80941Smrg enum vgt_event_type event, 946b8e80941Smrg bool need_seqno); 947b8e80941Smrg 948b8e80941Smrgbool 949b8e80941Smrgtu_get_memory_fd(struct tu_device *device, 950b8e80941Smrg struct tu_device_memory *memory, 951b8e80941Smrg int *pFD); 952b8e80941Smrg 953b8e80941Smrg/* 954b8e80941Smrg * Takes x,y,z as exact numbers of invocations, instead of blocks. 955b8e80941Smrg * 956b8e80941Smrg * Limitations: Can't call normal dispatch functions without binding or 957b8e80941Smrg * rebinding 958b8e80941Smrg * the compute pipeline. 959b8e80941Smrg */ 960b8e80941Smrgvoid 961b8e80941Smrgtu_unaligned_dispatch(struct tu_cmd_buffer *cmd_buffer, 962b8e80941Smrg uint32_t x, 963b8e80941Smrg uint32_t y, 964b8e80941Smrg uint32_t z); 965b8e80941Smrg 966b8e80941Smrgstruct tu_event 967b8e80941Smrg{ 968b8e80941Smrg uint64_t *map; 969b8e80941Smrg}; 970b8e80941Smrg 971b8e80941Smrgstruct tu_shader_module; 972b8e80941Smrg 973b8e80941Smrg#define TU_HASH_SHADER_IS_GEOM_COPY_SHADER (1 << 0) 974b8e80941Smrg#define TU_HASH_SHADER_SISCHED (1 << 1) 975b8e80941Smrg#define TU_HASH_SHADER_UNSAFE_MATH (1 << 2) 976b8e80941Smrgvoid 977b8e80941Smrgtu_hash_shaders(unsigned char *hash, 978b8e80941Smrg const VkPipelineShaderStageCreateInfo **stages, 979b8e80941Smrg const struct tu_pipeline_layout *layout, 980b8e80941Smrg const struct tu_pipeline_key *key, 981b8e80941Smrg uint32_t flags); 982b8e80941Smrg 983b8e80941Smrgstatic inline gl_shader_stage 984b8e80941Smrgvk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage) 985b8e80941Smrg{ 986b8e80941Smrg assert(__builtin_popcount(vk_stage) == 1); 987b8e80941Smrg return ffs(vk_stage) - 1; 988b8e80941Smrg} 989b8e80941Smrg 990b8e80941Smrgstatic inline VkShaderStageFlagBits 991b8e80941Smrgmesa_to_vk_shader_stage(gl_shader_stage mesa_stage) 992b8e80941Smrg{ 993b8e80941Smrg return (1 << mesa_stage); 994b8e80941Smrg} 995b8e80941Smrg 996b8e80941Smrg#define TU_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1) 997b8e80941Smrg 998b8e80941Smrg#define tu_foreach_stage(stage, stage_bits) \ 999b8e80941Smrg for (gl_shader_stage stage, \ 1000b8e80941Smrg __tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \ 1001b8e80941Smrg stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage))) 1002b8e80941Smrg 1003b8e80941Smrgstruct tu_shader_module 1004b8e80941Smrg{ 1005b8e80941Smrg unsigned char sha1[20]; 1006b8e80941Smrg 1007b8e80941Smrg uint32_t code_size; 1008b8e80941Smrg const uint32_t *code[0]; 1009b8e80941Smrg}; 1010b8e80941Smrg 1011b8e80941Smrgstruct tu_shader_compile_options 1012b8e80941Smrg{ 1013b8e80941Smrg struct ir3_shader_key key; 1014b8e80941Smrg 1015b8e80941Smrg bool optimize; 1016b8e80941Smrg bool include_binning_pass; 1017b8e80941Smrg}; 1018b8e80941Smrg 1019b8e80941Smrgstruct tu_shader 1020b8e80941Smrg{ 1021b8e80941Smrg struct ir3_shader ir3_shader; 1022b8e80941Smrg 1023b8e80941Smrg /* This may be true for vertex shaders. When true, variants[1] is the 1024b8e80941Smrg * binning variant and binning_binary is non-NULL. 1025b8e80941Smrg */ 1026b8e80941Smrg bool has_binning_pass; 1027b8e80941Smrg 1028b8e80941Smrg void *binary; 1029b8e80941Smrg void *binning_binary; 1030b8e80941Smrg 1031b8e80941Smrg struct ir3_shader_variant variants[0]; 1032b8e80941Smrg}; 1033b8e80941Smrg 1034b8e80941Smrgstruct tu_shader * 1035b8e80941Smrgtu_shader_create(struct tu_device *dev, 1036b8e80941Smrg gl_shader_stage stage, 1037b8e80941Smrg const VkPipelineShaderStageCreateInfo *stage_info, 1038b8e80941Smrg const VkAllocationCallbacks *alloc); 1039b8e80941Smrg 1040b8e80941Smrgvoid 1041b8e80941Smrgtu_shader_destroy(struct tu_device *dev, 1042b8e80941Smrg struct tu_shader *shader, 1043b8e80941Smrg const VkAllocationCallbacks *alloc); 1044b8e80941Smrg 1045b8e80941Smrgvoid 1046b8e80941Smrgtu_shader_compile_options_init( 1047b8e80941Smrg struct tu_shader_compile_options *options, 1048b8e80941Smrg const VkGraphicsPipelineCreateInfo *pipeline_info); 1049b8e80941Smrg 1050b8e80941SmrgVkResult 1051b8e80941Smrgtu_shader_compile(struct tu_device *dev, 1052b8e80941Smrg struct tu_shader *shader, 1053b8e80941Smrg const struct tu_shader *next_stage, 1054b8e80941Smrg const struct tu_shader_compile_options *options, 1055b8e80941Smrg const VkAllocationCallbacks *alloc); 1056b8e80941Smrg 1057b8e80941Smrgstruct tu_pipeline 1058b8e80941Smrg{ 1059b8e80941Smrg struct tu_cs cs; 1060b8e80941Smrg 1061b8e80941Smrg struct tu_dynamic_state dynamic_state; 1062b8e80941Smrg 1063b8e80941Smrg struct tu_pipeline_layout *layout; 1064b8e80941Smrg 1065b8e80941Smrg bool need_indirect_descriptor_sets; 1066b8e80941Smrg VkShaderStageFlags active_stages; 1067b8e80941Smrg 1068b8e80941Smrg struct 1069b8e80941Smrg { 1070b8e80941Smrg struct tu_bo binary_bo; 1071b8e80941Smrg struct tu_cs_entry state_ib; 1072b8e80941Smrg struct tu_cs_entry binning_state_ib; 1073b8e80941Smrg } program; 1074b8e80941Smrg 1075b8e80941Smrg struct 1076b8e80941Smrg { 1077b8e80941Smrg uint8_t bindings[MAX_VERTEX_ATTRIBS]; 1078b8e80941Smrg uint16_t strides[MAX_VERTEX_ATTRIBS]; 1079b8e80941Smrg uint16_t offsets[MAX_VERTEX_ATTRIBS]; 1080b8e80941Smrg uint32_t count; 1081b8e80941Smrg 1082b8e80941Smrg uint8_t binning_bindings[MAX_VERTEX_ATTRIBS]; 1083b8e80941Smrg uint16_t binning_strides[MAX_VERTEX_ATTRIBS]; 1084b8e80941Smrg uint16_t binning_offsets[MAX_VERTEX_ATTRIBS]; 1085b8e80941Smrg uint32_t binning_count; 1086b8e80941Smrg 1087b8e80941Smrg struct tu_cs_entry state_ib; 1088b8e80941Smrg struct tu_cs_entry binning_state_ib; 1089b8e80941Smrg } vi; 1090b8e80941Smrg 1091b8e80941Smrg struct 1092b8e80941Smrg { 1093b8e80941Smrg enum pc_di_primtype primtype; 1094b8e80941Smrg bool primitive_restart; 1095b8e80941Smrg } ia; 1096b8e80941Smrg 1097b8e80941Smrg struct 1098b8e80941Smrg { 1099b8e80941Smrg struct tu_cs_entry state_ib; 1100b8e80941Smrg } vp; 1101b8e80941Smrg 1102b8e80941Smrg struct 1103b8e80941Smrg { 1104b8e80941Smrg uint32_t gras_su_cntl; 1105b8e80941Smrg struct tu_cs_entry state_ib; 1106b8e80941Smrg } rast; 1107b8e80941Smrg 1108b8e80941Smrg struct 1109b8e80941Smrg { 1110b8e80941Smrg struct tu_cs_entry state_ib; 1111b8e80941Smrg } ds; 1112b8e80941Smrg 1113b8e80941Smrg struct 1114b8e80941Smrg { 1115b8e80941Smrg struct tu_cs_entry state_ib; 1116b8e80941Smrg } blend; 1117b8e80941Smrg}; 1118b8e80941Smrg 1119b8e80941Smrgvoid 1120b8e80941Smrgtu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewport); 1121b8e80941Smrg 1122b8e80941Smrgvoid 1123b8e80941Smrgtu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor); 1124b8e80941Smrg 1125b8e80941Smrgvoid 1126b8e80941Smrgtu6_emit_gras_su_cntl(struct tu_cs *cs, 1127b8e80941Smrg uint32_t gras_su_cntl, 1128b8e80941Smrg float line_width); 1129b8e80941Smrg 1130b8e80941Smrgvoid 1131b8e80941Smrgtu6_emit_depth_bias(struct tu_cs *cs, 1132b8e80941Smrg float constant_factor, 1133b8e80941Smrg float clamp, 1134b8e80941Smrg float slope_factor); 1135b8e80941Smrg 1136b8e80941Smrgvoid 1137b8e80941Smrgtu6_emit_stencil_compare_mask(struct tu_cs *cs, 1138b8e80941Smrg uint32_t front, 1139b8e80941Smrg uint32_t back); 1140b8e80941Smrg 1141b8e80941Smrgvoid 1142b8e80941Smrgtu6_emit_stencil_write_mask(struct tu_cs *cs, uint32_t front, uint32_t back); 1143b8e80941Smrg 1144b8e80941Smrgvoid 1145b8e80941Smrgtu6_emit_stencil_reference(struct tu_cs *cs, uint32_t front, uint32_t back); 1146b8e80941Smrg 1147b8e80941Smrgvoid 1148b8e80941Smrgtu6_emit_blend_constants(struct tu_cs *cs, const float constants[4]); 1149b8e80941Smrg 1150b8e80941Smrgstruct tu_userdata_info * 1151b8e80941Smrgtu_lookup_user_sgpr(struct tu_pipeline *pipeline, 1152b8e80941Smrg gl_shader_stage stage, 1153b8e80941Smrg int idx); 1154b8e80941Smrg 1155b8e80941Smrgstruct tu_shader_variant * 1156b8e80941Smrgtu_get_shader(struct tu_pipeline *pipeline, gl_shader_stage stage); 1157b8e80941Smrg 1158b8e80941Smrgstruct tu_graphics_pipeline_create_info 1159b8e80941Smrg{ 1160b8e80941Smrg bool use_rectlist; 1161b8e80941Smrg bool db_depth_clear; 1162b8e80941Smrg bool db_stencil_clear; 1163b8e80941Smrg bool db_depth_disable_expclear; 1164b8e80941Smrg bool db_stencil_disable_expclear; 1165b8e80941Smrg bool db_flush_depth_inplace; 1166b8e80941Smrg bool db_flush_stencil_inplace; 1167b8e80941Smrg bool db_resummarize; 1168b8e80941Smrg uint32_t custom_blend_mode; 1169b8e80941Smrg}; 1170b8e80941Smrg 1171b8e80941Smrgstruct tu_native_format 1172b8e80941Smrg{ 1173b8e80941Smrg int vtx; /* VFMTn_xxx or -1 */ 1174b8e80941Smrg int tex; /* TFMTn_xxx or -1 */ 1175b8e80941Smrg int rb; /* RBn_xxx or -1 */ 1176b8e80941Smrg int swap; /* enum a3xx_color_swap */ 1177b8e80941Smrg bool present; /* internal only; always true to external users */ 1178b8e80941Smrg}; 1179b8e80941Smrg 1180b8e80941Smrgconst struct tu_native_format * 1181b8e80941Smrgtu6_get_native_format(VkFormat format); 1182b8e80941Smrg 1183b8e80941Smrgint 1184b8e80941Smrgtu_pack_clear_value(const VkClearValue *val, 1185b8e80941Smrg VkFormat format, 1186b8e80941Smrg uint32_t buf[4]); 1187b8e80941Smrgenum a6xx_2d_ifmt tu6_rb_fmt_to_ifmt(enum a6xx_color_fmt fmt); 1188b8e80941Smrg 1189b8e80941Smrgstruct tu_image_level 1190b8e80941Smrg{ 1191b8e80941Smrg VkDeviceSize offset; 1192b8e80941Smrg VkDeviceSize size; 1193b8e80941Smrg uint32_t pitch; 1194b8e80941Smrg}; 1195b8e80941Smrg 1196b8e80941Smrgstruct tu_image 1197b8e80941Smrg{ 1198b8e80941Smrg VkImageType type; 1199b8e80941Smrg /* The original VkFormat provided by the client. This may not match any 1200b8e80941Smrg * of the actual surface formats. 1201b8e80941Smrg */ 1202b8e80941Smrg VkFormat vk_format; 1203b8e80941Smrg VkImageAspectFlags aspects; 1204b8e80941Smrg VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */ 1205b8e80941Smrg VkImageTiling tiling; /** VkImageCreateInfo::tiling */ 1206b8e80941Smrg VkImageCreateFlags flags; /** VkImageCreateInfo::flags */ 1207b8e80941Smrg VkExtent3D extent; 1208b8e80941Smrg uint32_t level_count; 1209b8e80941Smrg uint32_t layer_count; 1210b8e80941Smrg 1211b8e80941Smrg VkDeviceSize size; 1212b8e80941Smrg uint32_t alignment; 1213b8e80941Smrg 1214b8e80941Smrg /* memory layout */ 1215b8e80941Smrg VkDeviceSize layer_size; 1216b8e80941Smrg struct tu_image_level levels[15]; 1217b8e80941Smrg unsigned tile_mode; 1218b8e80941Smrg 1219b8e80941Smrg unsigned queue_family_mask; 1220b8e80941Smrg bool exclusive; 1221b8e80941Smrg bool shareable; 1222b8e80941Smrg 1223b8e80941Smrg /* For VK_ANDROID_native_buffer, the WSI image owns the memory, */ 1224b8e80941Smrg VkDeviceMemory owned_memory; 1225b8e80941Smrg 1226b8e80941Smrg /* Set when bound */ 1227b8e80941Smrg const struct tu_bo *bo; 1228b8e80941Smrg VkDeviceSize bo_offset; 1229b8e80941Smrg}; 1230b8e80941Smrg 1231b8e80941Smrgunsigned 1232b8e80941Smrgtu_image_queue_family_mask(const struct tu_image *image, 1233b8e80941Smrg uint32_t family, 1234b8e80941Smrg uint32_t queue_family); 1235b8e80941Smrg 1236b8e80941Smrgstatic inline uint32_t 1237b8e80941Smrgtu_get_layerCount(const struct tu_image *image, 1238b8e80941Smrg const VkImageSubresourceRange *range) 1239b8e80941Smrg{ 1240b8e80941Smrg return range->layerCount == VK_REMAINING_ARRAY_LAYERS 1241b8e80941Smrg ? image->layer_count - range->baseArrayLayer 1242b8e80941Smrg : range->layerCount; 1243b8e80941Smrg} 1244b8e80941Smrg 1245b8e80941Smrgstatic inline uint32_t 1246b8e80941Smrgtu_get_levelCount(const struct tu_image *image, 1247b8e80941Smrg const VkImageSubresourceRange *range) 1248b8e80941Smrg{ 1249b8e80941Smrg return range->levelCount == VK_REMAINING_MIP_LEVELS 1250b8e80941Smrg ? image->level_count - range->baseMipLevel 1251b8e80941Smrg : range->levelCount; 1252b8e80941Smrg} 1253b8e80941Smrg 1254b8e80941Smrgstruct tu_image_view 1255b8e80941Smrg{ 1256b8e80941Smrg struct tu_image *image; /**< VkImageViewCreateInfo::image */ 1257b8e80941Smrg 1258b8e80941Smrg VkImageViewType type; 1259b8e80941Smrg VkImageAspectFlags aspect_mask; 1260b8e80941Smrg VkFormat vk_format; 1261b8e80941Smrg uint32_t base_layer; 1262b8e80941Smrg uint32_t layer_count; 1263b8e80941Smrg uint32_t base_mip; 1264b8e80941Smrg uint32_t level_count; 1265b8e80941Smrg VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */ 1266b8e80941Smrg 1267b8e80941Smrg uint32_t descriptor[16]; 1268b8e80941Smrg 1269b8e80941Smrg /* Descriptor for use as a storage image as opposed to a sampled image. 1270b8e80941Smrg * This has a few differences for cube maps (e.g. type). 1271b8e80941Smrg */ 1272b8e80941Smrg uint32_t storage_descriptor[16]; 1273b8e80941Smrg}; 1274b8e80941Smrg 1275b8e80941Smrgstruct tu_sampler 1276b8e80941Smrg{ 1277b8e80941Smrg}; 1278b8e80941Smrg 1279b8e80941Smrgstruct tu_image_create_info 1280b8e80941Smrg{ 1281b8e80941Smrg const VkImageCreateInfo *vk_info; 1282b8e80941Smrg bool scanout; 1283b8e80941Smrg bool no_metadata_planes; 1284b8e80941Smrg}; 1285b8e80941Smrg 1286b8e80941SmrgVkResult 1287b8e80941Smrgtu_image_create(VkDevice _device, 1288b8e80941Smrg const struct tu_image_create_info *info, 1289b8e80941Smrg const VkAllocationCallbacks *alloc, 1290b8e80941Smrg VkImage *pImage); 1291b8e80941Smrg 1292b8e80941SmrgVkResult 1293b8e80941Smrgtu_image_from_gralloc(VkDevice device_h, 1294b8e80941Smrg const VkImageCreateInfo *base_info, 1295b8e80941Smrg const VkNativeBufferANDROID *gralloc_info, 1296b8e80941Smrg const VkAllocationCallbacks *alloc, 1297b8e80941Smrg VkImage *out_image_h); 1298b8e80941Smrg 1299b8e80941Smrgvoid 1300b8e80941Smrgtu_image_view_init(struct tu_image_view *view, 1301b8e80941Smrg struct tu_device *device, 1302b8e80941Smrg const VkImageViewCreateInfo *pCreateInfo); 1303b8e80941Smrg 1304b8e80941Smrgstruct tu_buffer_view 1305b8e80941Smrg{ 1306b8e80941Smrg VkFormat vk_format; 1307b8e80941Smrg uint64_t range; /**< VkBufferViewCreateInfo::range */ 1308b8e80941Smrg uint32_t state[4]; 1309b8e80941Smrg}; 1310b8e80941Smrgvoid 1311b8e80941Smrgtu_buffer_view_init(struct tu_buffer_view *view, 1312b8e80941Smrg struct tu_device *device, 1313b8e80941Smrg const VkBufferViewCreateInfo *pCreateInfo); 1314b8e80941Smrg 1315b8e80941Smrgstatic inline struct VkExtent3D 1316b8e80941Smrgtu_sanitize_image_extent(const VkImageType imageType, 1317b8e80941Smrg const struct VkExtent3D imageExtent) 1318b8e80941Smrg{ 1319b8e80941Smrg switch (imageType) { 1320b8e80941Smrg case VK_IMAGE_TYPE_1D: 1321b8e80941Smrg return (VkExtent3D) { imageExtent.width, 1, 1 }; 1322b8e80941Smrg case VK_IMAGE_TYPE_2D: 1323b8e80941Smrg return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 }; 1324b8e80941Smrg case VK_IMAGE_TYPE_3D: 1325b8e80941Smrg return imageExtent; 1326b8e80941Smrg default: 1327b8e80941Smrg unreachable("invalid image type"); 1328b8e80941Smrg } 1329b8e80941Smrg} 1330b8e80941Smrg 1331b8e80941Smrgstatic inline struct VkOffset3D 1332b8e80941Smrgtu_sanitize_image_offset(const VkImageType imageType, 1333b8e80941Smrg const struct VkOffset3D imageOffset) 1334b8e80941Smrg{ 1335b8e80941Smrg switch (imageType) { 1336b8e80941Smrg case VK_IMAGE_TYPE_1D: 1337b8e80941Smrg return (VkOffset3D) { imageOffset.x, 0, 0 }; 1338b8e80941Smrg case VK_IMAGE_TYPE_2D: 1339b8e80941Smrg return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 }; 1340b8e80941Smrg case VK_IMAGE_TYPE_3D: 1341b8e80941Smrg return imageOffset; 1342b8e80941Smrg default: 1343b8e80941Smrg unreachable("invalid image type"); 1344b8e80941Smrg } 1345b8e80941Smrg} 1346b8e80941Smrg 1347b8e80941Smrgstruct tu_attachment_info 1348b8e80941Smrg{ 1349b8e80941Smrg struct tu_image_view *attachment; 1350b8e80941Smrg}; 1351b8e80941Smrg 1352b8e80941Smrgstruct tu_framebuffer 1353b8e80941Smrg{ 1354b8e80941Smrg uint32_t width; 1355b8e80941Smrg uint32_t height; 1356b8e80941Smrg uint32_t layers; 1357b8e80941Smrg 1358b8e80941Smrg uint32_t attachment_count; 1359b8e80941Smrg struct tu_attachment_info attachments[0]; 1360b8e80941Smrg}; 1361b8e80941Smrg 1362b8e80941Smrgstruct tu_subpass_barrier 1363b8e80941Smrg{ 1364b8e80941Smrg VkPipelineStageFlags src_stage_mask; 1365b8e80941Smrg VkAccessFlags src_access_mask; 1366b8e80941Smrg VkAccessFlags dst_access_mask; 1367b8e80941Smrg}; 1368b8e80941Smrg 1369b8e80941Smrgvoid 1370b8e80941Smrgtu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer, 1371b8e80941Smrg const struct tu_subpass_barrier *barrier); 1372b8e80941Smrg 1373b8e80941Smrgstruct tu_subpass_attachment 1374b8e80941Smrg{ 1375b8e80941Smrg uint32_t attachment; 1376b8e80941Smrg VkImageLayout layout; 1377b8e80941Smrg}; 1378b8e80941Smrg 1379b8e80941Smrgstruct tu_subpass 1380b8e80941Smrg{ 1381b8e80941Smrg uint32_t input_count; 1382b8e80941Smrg uint32_t color_count; 1383b8e80941Smrg struct tu_subpass_attachment *input_attachments; 1384b8e80941Smrg struct tu_subpass_attachment *color_attachments; 1385b8e80941Smrg struct tu_subpass_attachment *resolve_attachments; 1386b8e80941Smrg struct tu_subpass_attachment depth_stencil_attachment; 1387b8e80941Smrg 1388b8e80941Smrg /** Subpass has at least one resolve attachment */ 1389b8e80941Smrg bool has_resolve; 1390b8e80941Smrg 1391b8e80941Smrg struct tu_subpass_barrier start_barrier; 1392b8e80941Smrg 1393b8e80941Smrg uint32_t view_mask; 1394b8e80941Smrg VkSampleCountFlagBits max_sample_count; 1395b8e80941Smrg}; 1396b8e80941Smrg 1397b8e80941Smrgstruct tu_render_pass_attachment 1398b8e80941Smrg{ 1399b8e80941Smrg VkFormat format; 1400b8e80941Smrg uint32_t samples; 1401b8e80941Smrg VkAttachmentLoadOp load_op; 1402b8e80941Smrg VkAttachmentLoadOp stencil_load_op; 1403b8e80941Smrg VkImageLayout initial_layout; 1404b8e80941Smrg VkImageLayout final_layout; 1405b8e80941Smrg uint32_t view_mask; 1406b8e80941Smrg}; 1407b8e80941Smrg 1408b8e80941Smrgstruct tu_render_pass 1409b8e80941Smrg{ 1410b8e80941Smrg uint32_t attachment_count; 1411b8e80941Smrg uint32_t subpass_count; 1412b8e80941Smrg struct tu_subpass_attachment *subpass_attachments; 1413b8e80941Smrg struct tu_render_pass_attachment *attachments; 1414b8e80941Smrg struct tu_subpass_barrier end_barrier; 1415b8e80941Smrg struct tu_subpass subpasses[0]; 1416b8e80941Smrg}; 1417b8e80941Smrg 1418b8e80941SmrgVkResult 1419b8e80941Smrgtu_device_init_meta(struct tu_device *device); 1420b8e80941Smrgvoid 1421b8e80941Smrgtu_device_finish_meta(struct tu_device *device); 1422b8e80941Smrg 1423b8e80941Smrgstruct tu_query_pool 1424b8e80941Smrg{ 1425b8e80941Smrg uint32_t stride; 1426b8e80941Smrg uint32_t availability_offset; 1427b8e80941Smrg uint64_t size; 1428b8e80941Smrg char *ptr; 1429b8e80941Smrg VkQueryType type; 1430b8e80941Smrg uint32_t pipeline_stats_mask; 1431b8e80941Smrg}; 1432b8e80941Smrg 1433b8e80941Smrgstruct tu_semaphore 1434b8e80941Smrg{ 1435b8e80941Smrg uint32_t syncobj; 1436b8e80941Smrg uint32_t temp_syncobj; 1437b8e80941Smrg}; 1438b8e80941Smrg 1439b8e80941Smrgvoid 1440b8e80941Smrgtu_set_descriptor_set(struct tu_cmd_buffer *cmd_buffer, 1441b8e80941Smrg VkPipelineBindPoint bind_point, 1442b8e80941Smrg struct tu_descriptor_set *set, 1443b8e80941Smrg unsigned idx); 1444b8e80941Smrg 1445b8e80941Smrgvoid 1446b8e80941Smrgtu_update_descriptor_sets(struct tu_device *device, 1447b8e80941Smrg struct tu_cmd_buffer *cmd_buffer, 1448b8e80941Smrg VkDescriptorSet overrideSet, 1449b8e80941Smrg uint32_t descriptorWriteCount, 1450b8e80941Smrg const VkWriteDescriptorSet *pDescriptorWrites, 1451b8e80941Smrg uint32_t descriptorCopyCount, 1452b8e80941Smrg const VkCopyDescriptorSet *pDescriptorCopies); 1453b8e80941Smrg 1454b8e80941Smrgvoid 1455b8e80941Smrgtu_update_descriptor_set_with_template( 1456b8e80941Smrg struct tu_device *device, 1457b8e80941Smrg struct tu_cmd_buffer *cmd_buffer, 1458b8e80941Smrg struct tu_descriptor_set *set, 1459b8e80941Smrg VkDescriptorUpdateTemplate descriptorUpdateTemplate, 1460b8e80941Smrg const void *pData); 1461b8e80941Smrg 1462b8e80941Smrgvoid 1463b8e80941Smrgtu_meta_push_descriptor_set(struct tu_cmd_buffer *cmd_buffer, 1464b8e80941Smrg VkPipelineBindPoint pipelineBindPoint, 1465b8e80941Smrg VkPipelineLayout _layout, 1466b8e80941Smrg uint32_t set, 1467b8e80941Smrg uint32_t descriptorWriteCount, 1468b8e80941Smrg const VkWriteDescriptorSet *pDescriptorWrites); 1469b8e80941Smrg 1470b8e80941Smrgint 1471b8e80941Smrgtu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id); 1472b8e80941Smrg 1473b8e80941Smrgint 1474b8e80941Smrgtu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size); 1475b8e80941Smrg 1476b8e80941Smrgint 1477b8e80941Smrgtu_drm_submitqueue_new(const struct tu_device *dev, 1478b8e80941Smrg int priority, 1479b8e80941Smrg uint32_t *queue_id); 1480b8e80941Smrg 1481b8e80941Smrgvoid 1482b8e80941Smrgtu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id); 1483b8e80941Smrg 1484b8e80941Smrguint32_t 1485b8e80941Smrgtu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags); 1486b8e80941Smrguint32_t 1487b8e80941Smrgtu_gem_import_dmabuf(const struct tu_device *dev, 1488b8e80941Smrg int prime_fd, 1489b8e80941Smrg uint64_t size); 1490b8e80941Smrgint 1491b8e80941Smrgtu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle); 1492b8e80941Smrgvoid 1493b8e80941Smrgtu_gem_close(const struct tu_device *dev, uint32_t gem_handle); 1494b8e80941Smrguint64_t 1495b8e80941Smrgtu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle); 1496b8e80941Smrguint64_t 1497b8e80941Smrgtu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle); 1498b8e80941Smrg 1499b8e80941Smrg#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \ 1500b8e80941Smrg \ 1501b8e80941Smrg static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \ 1502b8e80941Smrg { \ 1503b8e80941Smrg return (struct __tu_type *) _handle; \ 1504b8e80941Smrg } \ 1505b8e80941Smrg \ 1506b8e80941Smrg static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \ 1507b8e80941Smrg { \ 1508b8e80941Smrg return (__VkType) _obj; \ 1509b8e80941Smrg } 1510b8e80941Smrg 1511b8e80941Smrg#define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \ 1512b8e80941Smrg \ 1513b8e80941Smrg static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \ 1514b8e80941Smrg { \ 1515b8e80941Smrg return (struct __tu_type *) (uintptr_t) _handle; \ 1516b8e80941Smrg } \ 1517b8e80941Smrg \ 1518b8e80941Smrg static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \ 1519b8e80941Smrg { \ 1520b8e80941Smrg return (__VkType)(uintptr_t) _obj; \ 1521b8e80941Smrg } 1522b8e80941Smrg 1523b8e80941Smrg#define TU_FROM_HANDLE(__tu_type, __name, __handle) \ 1524b8e80941Smrg struct __tu_type *__name = __tu_type##_from_handle(__handle) 1525b8e80941Smrg 1526b8e80941SmrgTU_DEFINE_HANDLE_CASTS(tu_cmd_buffer, VkCommandBuffer) 1527b8e80941SmrgTU_DEFINE_HANDLE_CASTS(tu_device, VkDevice) 1528b8e80941SmrgTU_DEFINE_HANDLE_CASTS(tu_instance, VkInstance) 1529b8e80941SmrgTU_DEFINE_HANDLE_CASTS(tu_physical_device, VkPhysicalDevice) 1530b8e80941SmrgTU_DEFINE_HANDLE_CASTS(tu_queue, VkQueue) 1531b8e80941Smrg 1532b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_cmd_pool, VkCommandPool) 1533b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer, VkBuffer) 1534b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer_view, VkBufferView) 1535b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool, VkDescriptorPool) 1536b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set, VkDescriptorSet) 1537b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout, 1538b8e80941Smrg VkDescriptorSetLayout) 1539b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template, 1540b8e80941Smrg VkDescriptorUpdateTemplate) 1541b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory) 1542b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence) 1543b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent) 1544b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_framebuffer, VkFramebuffer) 1545b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_image, VkImage) 1546b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_image_view, VkImageView); 1547b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_cache, VkPipelineCache) 1548b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline, VkPipeline) 1549b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_layout, VkPipelineLayout) 1550b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool, VkQueryPool) 1551b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass, VkRenderPass) 1552b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler, VkSampler) 1553b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module, VkShaderModule) 1554b8e80941SmrgTU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore, VkSemaphore) 1555b8e80941Smrg 1556b8e80941Smrg#endif /* TU_PRIVATE_H */ 1557