17ec681f3Smrg/* 27ec681f3Smrg * Copyright © 2020 Google, Inc. 37ec681f3Smrg * 47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a 57ec681f3Smrg * copy of this software and associated documentation files (the "Software"), 67ec681f3Smrg * to deal in the Software without restriction, including without limitation 77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the 97ec681f3Smrg * Software is furnished to do so, subject to the following conditions: 107ec681f3Smrg * 117ec681f3Smrg * The above copyright notice and this permission notice (including the next 127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the 137ec681f3Smrg * Software. 147ec681f3Smrg * 157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 187ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 197ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 207ec681f3Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 217ec681f3Smrg * DEALINGS IN THE SOFTWARE. 227ec681f3Smrg */ 237ec681f3Smrg 247ec681f3Smrg#include "tu_private.h" 257ec681f3Smrg 267ec681f3Smrg#include <errno.h> 277ec681f3Smrg#include <fcntl.h> 287ec681f3Smrg#include <stdint.h> 297ec681f3Smrg#include <sys/ioctl.h> 307ec681f3Smrg#include <sys/mman.h> 317ec681f3Smrg 327ec681f3Smrg#include "msm_kgsl.h" 337ec681f3Smrg#include "vk_util.h" 347ec681f3Smrg 357ec681f3Smrgstruct tu_syncobj { 367ec681f3Smrg struct vk_object_base base; 377ec681f3Smrg uint32_t timestamp; 387ec681f3Smrg bool timestamp_valid; 397ec681f3Smrg}; 407ec681f3Smrg 417ec681f3Smrgstatic int 427ec681f3Smrgsafe_ioctl(int fd, unsigned long request, void *arg) 437ec681f3Smrg{ 447ec681f3Smrg int ret; 457ec681f3Smrg 467ec681f3Smrg do { 477ec681f3Smrg ret = ioctl(fd, request, arg); 487ec681f3Smrg } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 497ec681f3Smrg 507ec681f3Smrg return ret; 517ec681f3Smrg} 527ec681f3Smrg 537ec681f3Smrgint 547ec681f3Smrgtu_drm_submitqueue_new(const struct tu_device *dev, 557ec681f3Smrg int priority, 567ec681f3Smrg uint32_t *queue_id) 577ec681f3Smrg{ 587ec681f3Smrg struct kgsl_drawctxt_create req = { 597ec681f3Smrg .flags = KGSL_CONTEXT_SAVE_GMEM | 607ec681f3Smrg KGSL_CONTEXT_NO_GMEM_ALLOC | 617ec681f3Smrg KGSL_CONTEXT_PREAMBLE, 627ec681f3Smrg }; 637ec681f3Smrg 647ec681f3Smrg int ret = safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req); 657ec681f3Smrg if (ret) 667ec681f3Smrg return ret; 677ec681f3Smrg 687ec681f3Smrg *queue_id = req.drawctxt_id; 697ec681f3Smrg 707ec681f3Smrg return 0; 717ec681f3Smrg} 727ec681f3Smrg 737ec681f3Smrgvoid 747ec681f3Smrgtu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id) 757ec681f3Smrg{ 767ec681f3Smrg struct kgsl_drawctxt_destroy req = { 777ec681f3Smrg .drawctxt_id = queue_id, 787ec681f3Smrg }; 797ec681f3Smrg 807ec681f3Smrg safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req); 817ec681f3Smrg} 827ec681f3Smrg 837ec681f3SmrgVkResult 847ec681f3Smrgtu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size, 857ec681f3Smrg enum tu_bo_alloc_flags flags) 867ec681f3Smrg{ 877ec681f3Smrg struct kgsl_gpumem_alloc_id req = { 887ec681f3Smrg .size = size, 897ec681f3Smrg }; 907ec681f3Smrg 917ec681f3Smrg if (flags & TU_BO_ALLOC_GPU_READ_ONLY) 927ec681f3Smrg req.flags |= KGSL_MEMFLAGS_GPUREADONLY; 937ec681f3Smrg 947ec681f3Smrg int ret; 957ec681f3Smrg 967ec681f3Smrg ret = safe_ioctl(dev->physical_device->local_fd, 977ec681f3Smrg IOCTL_KGSL_GPUMEM_ALLOC_ID, &req); 987ec681f3Smrg if (ret) { 997ec681f3Smrg return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY, 1007ec681f3Smrg "GPUMEM_ALLOC_ID failed (%s)", strerror(errno)); 1017ec681f3Smrg } 1027ec681f3Smrg 1037ec681f3Smrg *bo = (struct tu_bo) { 1047ec681f3Smrg .gem_handle = req.id, 1057ec681f3Smrg .size = req.mmapsize, 1067ec681f3Smrg .iova = req.gpuaddr, 1077ec681f3Smrg }; 1087ec681f3Smrg 1097ec681f3Smrg return VK_SUCCESS; 1107ec681f3Smrg} 1117ec681f3Smrg 1127ec681f3SmrgVkResult 1137ec681f3Smrgtu_bo_init_dmabuf(struct tu_device *dev, 1147ec681f3Smrg struct tu_bo *bo, 1157ec681f3Smrg uint64_t size, 1167ec681f3Smrg int fd) 1177ec681f3Smrg{ 1187ec681f3Smrg struct kgsl_gpuobj_import_dma_buf import_dmabuf = { 1197ec681f3Smrg .fd = fd, 1207ec681f3Smrg }; 1217ec681f3Smrg struct kgsl_gpuobj_import req = { 1227ec681f3Smrg .priv = (uintptr_t)&import_dmabuf, 1237ec681f3Smrg .priv_len = sizeof(import_dmabuf), 1247ec681f3Smrg .flags = 0, 1257ec681f3Smrg .type = KGSL_USER_MEM_TYPE_DMABUF, 1267ec681f3Smrg }; 1277ec681f3Smrg int ret; 1287ec681f3Smrg 1297ec681f3Smrg ret = safe_ioctl(dev->physical_device->local_fd, 1307ec681f3Smrg IOCTL_KGSL_GPUOBJ_IMPORT, &req); 1317ec681f3Smrg if (ret) 1327ec681f3Smrg return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY, 1337ec681f3Smrg "Failed to import dma-buf (%s)\n", strerror(errno)); 1347ec681f3Smrg 1357ec681f3Smrg struct kgsl_gpuobj_info info_req = { 1367ec681f3Smrg .id = req.id, 1377ec681f3Smrg }; 1387ec681f3Smrg 1397ec681f3Smrg ret = safe_ioctl(dev->physical_device->local_fd, 1407ec681f3Smrg IOCTL_KGSL_GPUOBJ_INFO, &info_req); 1417ec681f3Smrg if (ret) 1427ec681f3Smrg return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY, 1437ec681f3Smrg "Failed to get dma-buf info (%s)\n", strerror(errno)); 1447ec681f3Smrg 1457ec681f3Smrg *bo = (struct tu_bo) { 1467ec681f3Smrg .gem_handle = req.id, 1477ec681f3Smrg .size = info_req.size, 1487ec681f3Smrg .iova = info_req.gpuaddr, 1497ec681f3Smrg }; 1507ec681f3Smrg 1517ec681f3Smrg return VK_SUCCESS; 1527ec681f3Smrg} 1537ec681f3Smrg 1547ec681f3Smrgint 1557ec681f3Smrgtu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo) 1567ec681f3Smrg{ 1577ec681f3Smrg tu_stub(); 1587ec681f3Smrg 1597ec681f3Smrg return -1; 1607ec681f3Smrg} 1617ec681f3Smrg 1627ec681f3SmrgVkResult 1637ec681f3Smrgtu_bo_map(struct tu_device *dev, struct tu_bo *bo) 1647ec681f3Smrg{ 1657ec681f3Smrg if (bo->map) 1667ec681f3Smrg return VK_SUCCESS; 1677ec681f3Smrg 1687ec681f3Smrg uint64_t offset = bo->gem_handle << 12; 1697ec681f3Smrg void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, 1707ec681f3Smrg dev->physical_device->local_fd, offset); 1717ec681f3Smrg if (map == MAP_FAILED) 1727ec681f3Smrg return vk_error(dev, VK_ERROR_MEMORY_MAP_FAILED); 1737ec681f3Smrg 1747ec681f3Smrg bo->map = map; 1757ec681f3Smrg 1767ec681f3Smrg return VK_SUCCESS; 1777ec681f3Smrg} 1787ec681f3Smrg 1797ec681f3Smrgvoid 1807ec681f3Smrgtu_bo_finish(struct tu_device *dev, struct tu_bo *bo) 1817ec681f3Smrg{ 1827ec681f3Smrg assert(bo->gem_handle); 1837ec681f3Smrg 1847ec681f3Smrg if (bo->map) 1857ec681f3Smrg munmap(bo->map, bo->size); 1867ec681f3Smrg 1877ec681f3Smrg struct kgsl_gpumem_free_id req = { 1887ec681f3Smrg .id = bo->gem_handle 1897ec681f3Smrg }; 1907ec681f3Smrg 1917ec681f3Smrg safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_GPUMEM_FREE_ID, &req); 1927ec681f3Smrg} 1937ec681f3Smrg 1947ec681f3Smrgstatic VkResult 1957ec681f3Smrgget_kgsl_prop(int fd, unsigned int type, void *value, size_t size) 1967ec681f3Smrg{ 1977ec681f3Smrg struct kgsl_device_getproperty getprop = { 1987ec681f3Smrg .type = type, 1997ec681f3Smrg .value = value, 2007ec681f3Smrg .sizebytes = size, 2017ec681f3Smrg }; 2027ec681f3Smrg 2037ec681f3Smrg return safe_ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &getprop); 2047ec681f3Smrg} 2057ec681f3Smrg 2067ec681f3SmrgVkResult 2077ec681f3Smrgtu_enumerate_devices(struct tu_instance *instance) 2087ec681f3Smrg{ 2097ec681f3Smrg static const char path[] = "/dev/kgsl-3d0"; 2107ec681f3Smrg int fd; 2117ec681f3Smrg 2127ec681f3Smrg struct tu_physical_device *device = &instance->physical_devices[0]; 2137ec681f3Smrg 2147ec681f3Smrg if (instance->vk.enabled_extensions.KHR_display) 2157ec681f3Smrg return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER, 2167ec681f3Smrg "I can't KHR_display"); 2177ec681f3Smrg 2187ec681f3Smrg fd = open(path, O_RDWR | O_CLOEXEC); 2197ec681f3Smrg if (fd < 0) { 2207ec681f3Smrg instance->physical_device_count = 0; 2217ec681f3Smrg return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER, 2227ec681f3Smrg "failed to open device %s", path); 2237ec681f3Smrg } 2247ec681f3Smrg 2257ec681f3Smrg struct kgsl_devinfo info; 2267ec681f3Smrg if (get_kgsl_prop(fd, KGSL_PROP_DEVICE_INFO, &info, sizeof(info))) 2277ec681f3Smrg goto fail; 2287ec681f3Smrg 2297ec681f3Smrg uint64_t gmem_iova; 2307ec681f3Smrg if (get_kgsl_prop(fd, KGSL_PROP_UCHE_GMEM_VADDR, &gmem_iova, sizeof(gmem_iova))) 2317ec681f3Smrg goto fail; 2327ec681f3Smrg 2337ec681f3Smrg /* kgsl version check? */ 2347ec681f3Smrg 2357ec681f3Smrg if (instance->debug_flags & TU_DEBUG_STARTUP) 2367ec681f3Smrg mesa_logi("Found compatible device '%s'.", path); 2377ec681f3Smrg 2387ec681f3Smrg device->instance = instance; 2397ec681f3Smrg device->master_fd = -1; 2407ec681f3Smrg device->local_fd = fd; 2417ec681f3Smrg 2427ec681f3Smrg device->dev_id.gpu_id = 2437ec681f3Smrg ((info.chip_id >> 24) & 0xff) * 100 + 2447ec681f3Smrg ((info.chip_id >> 16) & 0xff) * 10 + 2457ec681f3Smrg ((info.chip_id >> 8) & 0xff); 2467ec681f3Smrg device->dev_id.chip_id = info.chip_id; 2477ec681f3Smrg device->gmem_size = info.gmem_sizebytes; 2487ec681f3Smrg device->gmem_base = gmem_iova; 2497ec681f3Smrg 2507ec681f3Smrg device->heap.size = tu_get_system_heap_size(); 2517ec681f3Smrg device->heap.used = 0u; 2527ec681f3Smrg device->heap.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT; 2537ec681f3Smrg 2547ec681f3Smrg if (tu_physical_device_init(device, instance) != VK_SUCCESS) 2557ec681f3Smrg goto fail; 2567ec681f3Smrg 2577ec681f3Smrg instance->physical_device_count = 1; 2587ec681f3Smrg 2597ec681f3Smrg return VK_SUCCESS; 2607ec681f3Smrg 2617ec681f3Smrgfail: 2627ec681f3Smrg close(fd); 2637ec681f3Smrg return VK_ERROR_INITIALIZATION_FAILED; 2647ec681f3Smrg} 2657ec681f3Smrg 2667ec681f3Smrgstatic int 2677ec681f3Smrgtimestamp_to_fd(struct tu_queue *queue, uint32_t timestamp) 2687ec681f3Smrg{ 2697ec681f3Smrg int fd; 2707ec681f3Smrg struct kgsl_timestamp_event event = { 2717ec681f3Smrg .type = KGSL_TIMESTAMP_EVENT_FENCE, 2727ec681f3Smrg .context_id = queue->msm_queue_id, 2737ec681f3Smrg .timestamp = timestamp, 2747ec681f3Smrg .priv = &fd, 2757ec681f3Smrg .len = sizeof(fd), 2767ec681f3Smrg }; 2777ec681f3Smrg 2787ec681f3Smrg int ret = safe_ioctl(queue->device->fd, IOCTL_KGSL_TIMESTAMP_EVENT, &event); 2797ec681f3Smrg if (ret) 2807ec681f3Smrg return -1; 2817ec681f3Smrg 2827ec681f3Smrg return fd; 2837ec681f3Smrg} 2847ec681f3Smrg 2857ec681f3Smrg/* return true if timestamp a is greater (more recent) then b 2867ec681f3Smrg * this relies on timestamps never having a difference > (1<<31) 2877ec681f3Smrg */ 2887ec681f3Smrgstatic inline bool 2897ec681f3Smrgtimestamp_cmp(uint32_t a, uint32_t b) 2907ec681f3Smrg{ 2917ec681f3Smrg return (int32_t) (a - b) >= 0; 2927ec681f3Smrg} 2937ec681f3Smrg 2947ec681f3Smrgstatic uint32_t 2957ec681f3Smrgmax_ts(uint32_t a, uint32_t b) 2967ec681f3Smrg{ 2977ec681f3Smrg return timestamp_cmp(a, b) ? a : b; 2987ec681f3Smrg} 2997ec681f3Smrg 3007ec681f3Smrgstatic uint32_t 3017ec681f3Smrgmin_ts(uint32_t a, uint32_t b) 3027ec681f3Smrg{ 3037ec681f3Smrg return timestamp_cmp(a, b) ? b : a; 3047ec681f3Smrg} 3057ec681f3Smrg 3067ec681f3Smrgstatic struct tu_syncobj 3077ec681f3Smrgsync_merge(const VkSemaphore *syncobjs, uint32_t count, bool wait_all, bool reset) 3087ec681f3Smrg{ 3097ec681f3Smrg struct tu_syncobj ret; 3107ec681f3Smrg 3117ec681f3Smrg ret.timestamp_valid = false; 3127ec681f3Smrg 3137ec681f3Smrg for (uint32_t i = 0; i < count; ++i) { 3147ec681f3Smrg TU_FROM_HANDLE(tu_syncobj, sync, syncobjs[i]); 3157ec681f3Smrg 3167ec681f3Smrg /* TODO: this means the fence is unsignaled and will never become signaled */ 3177ec681f3Smrg if (!sync->timestamp_valid) 3187ec681f3Smrg continue; 3197ec681f3Smrg 3207ec681f3Smrg if (!ret.timestamp_valid) 3217ec681f3Smrg ret.timestamp = sync->timestamp; 3227ec681f3Smrg else if (wait_all) 3237ec681f3Smrg ret.timestamp = max_ts(ret.timestamp, sync->timestamp); 3247ec681f3Smrg else 3257ec681f3Smrg ret.timestamp = min_ts(ret.timestamp, sync->timestamp); 3267ec681f3Smrg 3277ec681f3Smrg ret.timestamp_valid = true; 3287ec681f3Smrg if (reset) 3297ec681f3Smrg sync->timestamp_valid = false; 3307ec681f3Smrg 3317ec681f3Smrg } 3327ec681f3Smrg return ret; 3337ec681f3Smrg} 3347ec681f3Smrg 3357ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 3367ec681f3Smrgtu_QueueSubmit(VkQueue _queue, 3377ec681f3Smrg uint32_t submitCount, 3387ec681f3Smrg const VkSubmitInfo *pSubmits, 3397ec681f3Smrg VkFence _fence) 3407ec681f3Smrg{ 3417ec681f3Smrg TU_FROM_HANDLE(tu_queue, queue, _queue); 3427ec681f3Smrg TU_FROM_HANDLE(tu_syncobj, fence, _fence); 3437ec681f3Smrg VkResult result = VK_SUCCESS; 3447ec681f3Smrg 3457ec681f3Smrg uint32_t max_entry_count = 0; 3467ec681f3Smrg for (uint32_t i = 0; i < submitCount; ++i) { 3477ec681f3Smrg const VkSubmitInfo *submit = pSubmits + i; 3487ec681f3Smrg 3497ec681f3Smrg const VkPerformanceQuerySubmitInfoKHR *perf_info = 3507ec681f3Smrg vk_find_struct_const(pSubmits[i].pNext, 3517ec681f3Smrg PERFORMANCE_QUERY_SUBMIT_INFO_KHR); 3527ec681f3Smrg 3537ec681f3Smrg uint32_t entry_count = 0; 3547ec681f3Smrg for (uint32_t j = 0; j < submit->commandBufferCount; ++j) { 3557ec681f3Smrg TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]); 3567ec681f3Smrg entry_count += cmdbuf->cs.entry_count; 3577ec681f3Smrg if (perf_info) 3587ec681f3Smrg entry_count++; 3597ec681f3Smrg } 3607ec681f3Smrg 3617ec681f3Smrg max_entry_count = MAX2(max_entry_count, entry_count); 3627ec681f3Smrg } 3637ec681f3Smrg 3647ec681f3Smrg struct kgsl_command_object *cmds = 3657ec681f3Smrg vk_alloc(&queue->device->vk.alloc, 3667ec681f3Smrg sizeof(cmds[0]) * max_entry_count, 8, 3677ec681f3Smrg VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 3687ec681f3Smrg if (cmds == NULL) 3697ec681f3Smrg return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY); 3707ec681f3Smrg 3717ec681f3Smrg for (uint32_t i = 0; i < submitCount; ++i) { 3727ec681f3Smrg const VkSubmitInfo *submit = pSubmits + i; 3737ec681f3Smrg uint32_t entry_idx = 0; 3747ec681f3Smrg const VkPerformanceQuerySubmitInfoKHR *perf_info = 3757ec681f3Smrg vk_find_struct_const(pSubmits[i].pNext, 3767ec681f3Smrg PERFORMANCE_QUERY_SUBMIT_INFO_KHR); 3777ec681f3Smrg 3787ec681f3Smrg 3797ec681f3Smrg for (uint32_t j = 0; j < submit->commandBufferCount; j++) { 3807ec681f3Smrg TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]); 3817ec681f3Smrg struct tu_cs *cs = &cmdbuf->cs; 3827ec681f3Smrg 3837ec681f3Smrg if (perf_info) { 3847ec681f3Smrg struct tu_cs_entry *perf_cs_entry = 3857ec681f3Smrg &cmdbuf->device->perfcntrs_pass_cs_entries[perf_info->counterPassIndex]; 3867ec681f3Smrg 3877ec681f3Smrg cmds[entry_idx++] = (struct kgsl_command_object) { 3887ec681f3Smrg .offset = perf_cs_entry->offset, 3897ec681f3Smrg .gpuaddr = perf_cs_entry->bo->iova, 3907ec681f3Smrg .size = perf_cs_entry->size, 3917ec681f3Smrg .flags = KGSL_CMDLIST_IB, 3927ec681f3Smrg .id = perf_cs_entry->bo->gem_handle, 3937ec681f3Smrg }; 3947ec681f3Smrg } 3957ec681f3Smrg 3967ec681f3Smrg for (unsigned k = 0; k < cs->entry_count; k++) { 3977ec681f3Smrg cmds[entry_idx++] = (struct kgsl_command_object) { 3987ec681f3Smrg .offset = cs->entries[k].offset, 3997ec681f3Smrg .gpuaddr = cs->entries[k].bo->iova, 4007ec681f3Smrg .size = cs->entries[k].size, 4017ec681f3Smrg .flags = KGSL_CMDLIST_IB, 4027ec681f3Smrg .id = cs->entries[k].bo->gem_handle, 4037ec681f3Smrg }; 4047ec681f3Smrg } 4057ec681f3Smrg } 4067ec681f3Smrg 4077ec681f3Smrg struct tu_syncobj s = sync_merge(submit->pWaitSemaphores, 4087ec681f3Smrg submit->waitSemaphoreCount, 4097ec681f3Smrg true, true); 4107ec681f3Smrg 4117ec681f3Smrg struct kgsl_cmd_syncpoint_timestamp ts = { 4127ec681f3Smrg .context_id = queue->msm_queue_id, 4137ec681f3Smrg .timestamp = s.timestamp, 4147ec681f3Smrg }; 4157ec681f3Smrg struct kgsl_command_syncpoint sync = { 4167ec681f3Smrg .type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP, 4177ec681f3Smrg .size = sizeof(ts), 4187ec681f3Smrg .priv = (uintptr_t) &ts, 4197ec681f3Smrg }; 4207ec681f3Smrg 4217ec681f3Smrg struct kgsl_gpu_command req = { 4227ec681f3Smrg .flags = KGSL_CMDBATCH_SUBMIT_IB_LIST, 4237ec681f3Smrg .context_id = queue->msm_queue_id, 4247ec681f3Smrg .cmdlist = (uint64_t) (uintptr_t) cmds, 4257ec681f3Smrg .numcmds = entry_idx, 4267ec681f3Smrg .cmdsize = sizeof(struct kgsl_command_object), 4277ec681f3Smrg .synclist = (uintptr_t) &sync, 4287ec681f3Smrg .syncsize = sizeof(struct kgsl_command_syncpoint), 4297ec681f3Smrg .numsyncs = s.timestamp_valid ? 1 : 0, 4307ec681f3Smrg }; 4317ec681f3Smrg 4327ec681f3Smrg int ret = safe_ioctl(queue->device->physical_device->local_fd, 4337ec681f3Smrg IOCTL_KGSL_GPU_COMMAND, &req); 4347ec681f3Smrg if (ret) { 4357ec681f3Smrg result = tu_device_set_lost(queue->device, 4367ec681f3Smrg "submit failed: %s\n", strerror(errno)); 4377ec681f3Smrg goto fail; 4387ec681f3Smrg } 4397ec681f3Smrg 4407ec681f3Smrg for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) { 4417ec681f3Smrg TU_FROM_HANDLE(tu_syncobj, sem, submit->pSignalSemaphores[i]); 4427ec681f3Smrg sem->timestamp = req.timestamp; 4437ec681f3Smrg sem->timestamp_valid = true; 4447ec681f3Smrg } 4457ec681f3Smrg 4467ec681f3Smrg /* no need to merge fences as queue execution is serialized */ 4477ec681f3Smrg if (i == submitCount - 1) { 4487ec681f3Smrg int fd = timestamp_to_fd(queue, req.timestamp); 4497ec681f3Smrg if (fd < 0) { 4507ec681f3Smrg result = tu_device_set_lost(queue->device, 4517ec681f3Smrg "Failed to create sync file for timestamp: %s\n", 4527ec681f3Smrg strerror(errno)); 4537ec681f3Smrg goto fail; 4547ec681f3Smrg } 4557ec681f3Smrg 4567ec681f3Smrg if (queue->fence >= 0) 4577ec681f3Smrg close(queue->fence); 4587ec681f3Smrg queue->fence = fd; 4597ec681f3Smrg 4607ec681f3Smrg if (fence) { 4617ec681f3Smrg fence->timestamp = req.timestamp; 4627ec681f3Smrg fence->timestamp_valid = true; 4637ec681f3Smrg } 4647ec681f3Smrg } 4657ec681f3Smrg } 4667ec681f3Smrgfail: 4677ec681f3Smrg vk_free(&queue->device->vk.alloc, cmds); 4687ec681f3Smrg 4697ec681f3Smrg return result; 4707ec681f3Smrg} 4717ec681f3Smrg 4727ec681f3Smrgstatic VkResult 4737ec681f3Smrgsync_create(VkDevice _device, 4747ec681f3Smrg bool signaled, 4757ec681f3Smrg bool fence, 4767ec681f3Smrg const VkAllocationCallbacks *pAllocator, 4777ec681f3Smrg void **p_sync) 4787ec681f3Smrg{ 4797ec681f3Smrg TU_FROM_HANDLE(tu_device, device, _device); 4807ec681f3Smrg 4817ec681f3Smrg struct tu_syncobj *sync = 4827ec681f3Smrg vk_object_alloc(&device->vk, pAllocator, sizeof(*sync), 4837ec681f3Smrg fence ? VK_OBJECT_TYPE_FENCE : VK_OBJECT_TYPE_SEMAPHORE); 4847ec681f3Smrg if (!sync) 4857ec681f3Smrg return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); 4867ec681f3Smrg 4877ec681f3Smrg if (signaled) 4887ec681f3Smrg tu_finishme("CREATE FENCE SIGNALED"); 4897ec681f3Smrg 4907ec681f3Smrg sync->timestamp_valid = false; 4917ec681f3Smrg *p_sync = sync; 4927ec681f3Smrg 4937ec681f3Smrg return VK_SUCCESS; 4947ec681f3Smrg} 4957ec681f3Smrg 4967ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 4977ec681f3Smrgtu_ImportSemaphoreFdKHR(VkDevice _device, 4987ec681f3Smrg const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) 4997ec681f3Smrg{ 5007ec681f3Smrg tu_finishme("ImportSemaphoreFdKHR"); 5017ec681f3Smrg return VK_SUCCESS; 5027ec681f3Smrg} 5037ec681f3Smrg 5047ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 5057ec681f3Smrgtu_GetSemaphoreFdKHR(VkDevice _device, 5067ec681f3Smrg const VkSemaphoreGetFdInfoKHR *pGetFdInfo, 5077ec681f3Smrg int *pFd) 5087ec681f3Smrg{ 5097ec681f3Smrg tu_finishme("GetSemaphoreFdKHR"); 5107ec681f3Smrg return VK_SUCCESS; 5117ec681f3Smrg} 5127ec681f3Smrg 5137ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 5147ec681f3Smrgtu_CreateSemaphore(VkDevice device, 5157ec681f3Smrg const VkSemaphoreCreateInfo *pCreateInfo, 5167ec681f3Smrg const VkAllocationCallbacks *pAllocator, 5177ec681f3Smrg VkSemaphore *pSemaphore) 5187ec681f3Smrg{ 5197ec681f3Smrg return sync_create(device, false, false, pAllocator, (void**) pSemaphore); 5207ec681f3Smrg} 5217ec681f3Smrg 5227ec681f3SmrgVKAPI_ATTR void VKAPI_CALL 5237ec681f3Smrgtu_DestroySemaphore(VkDevice _device, 5247ec681f3Smrg VkSemaphore semaphore, 5257ec681f3Smrg const VkAllocationCallbacks *pAllocator) 5267ec681f3Smrg{ 5277ec681f3Smrg TU_FROM_HANDLE(tu_device, device, _device); 5287ec681f3Smrg TU_FROM_HANDLE(tu_syncobj, sync, semaphore); 5297ec681f3Smrg 5307ec681f3Smrg if (!sync) 5317ec681f3Smrg return; 5327ec681f3Smrg 5337ec681f3Smrg vk_object_free(&device->vk, pAllocator, sync); 5347ec681f3Smrg} 5357ec681f3Smrg 5367ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 5377ec681f3Smrgtu_ImportFenceFdKHR(VkDevice _device, 5387ec681f3Smrg const VkImportFenceFdInfoKHR *pImportFenceFdInfo) 5397ec681f3Smrg{ 5407ec681f3Smrg tu_stub(); 5417ec681f3Smrg 5427ec681f3Smrg return VK_SUCCESS; 5437ec681f3Smrg} 5447ec681f3Smrg 5457ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 5467ec681f3Smrgtu_GetFenceFdKHR(VkDevice _device, 5477ec681f3Smrg const VkFenceGetFdInfoKHR *pGetFdInfo, 5487ec681f3Smrg int *pFd) 5497ec681f3Smrg{ 5507ec681f3Smrg tu_stub(); 5517ec681f3Smrg 5527ec681f3Smrg return VK_SUCCESS; 5537ec681f3Smrg} 5547ec681f3Smrg 5557ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 5567ec681f3Smrgtu_CreateFence(VkDevice device, 5577ec681f3Smrg const VkFenceCreateInfo *info, 5587ec681f3Smrg const VkAllocationCallbacks *pAllocator, 5597ec681f3Smrg VkFence *pFence) 5607ec681f3Smrg{ 5617ec681f3Smrg return sync_create(device, info->flags & VK_FENCE_CREATE_SIGNALED_BIT, true, 5627ec681f3Smrg pAllocator, (void**) pFence); 5637ec681f3Smrg} 5647ec681f3Smrg 5657ec681f3SmrgVKAPI_ATTR void VKAPI_CALL 5667ec681f3Smrgtu_DestroyFence(VkDevice _device, VkFence fence, const VkAllocationCallbacks *pAllocator) 5677ec681f3Smrg{ 5687ec681f3Smrg TU_FROM_HANDLE(tu_device, device, _device); 5697ec681f3Smrg TU_FROM_HANDLE(tu_syncobj, sync, fence); 5707ec681f3Smrg 5717ec681f3Smrg if (!sync) 5727ec681f3Smrg return; 5737ec681f3Smrg 5747ec681f3Smrg vk_object_free(&device->vk, pAllocator, sync); 5757ec681f3Smrg} 5767ec681f3Smrg 5777ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 5787ec681f3Smrgtu_WaitForFences(VkDevice _device, 5797ec681f3Smrg uint32_t count, 5807ec681f3Smrg const VkFence *pFences, 5817ec681f3Smrg VkBool32 waitAll, 5827ec681f3Smrg uint64_t timeout) 5837ec681f3Smrg{ 5847ec681f3Smrg TU_FROM_HANDLE(tu_device, device, _device); 5857ec681f3Smrg struct tu_syncobj s = sync_merge((const VkSemaphore*) pFences, count, waitAll, false); 5867ec681f3Smrg 5877ec681f3Smrg if (!s.timestamp_valid) 5887ec681f3Smrg return VK_SUCCESS; 5897ec681f3Smrg 5907ec681f3Smrg int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID, 5917ec681f3Smrg &(struct kgsl_device_waittimestamp_ctxtid) { 5927ec681f3Smrg .context_id = device->queues[0]->msm_queue_id, 5937ec681f3Smrg .timestamp = s.timestamp, 5947ec681f3Smrg .timeout = timeout / 1000000, 5957ec681f3Smrg }); 5967ec681f3Smrg if (ret) { 5977ec681f3Smrg assert(errno == ETIME); 5987ec681f3Smrg return VK_TIMEOUT; 5997ec681f3Smrg } 6007ec681f3Smrg 6017ec681f3Smrg return VK_SUCCESS; 6027ec681f3Smrg} 6037ec681f3Smrg 6047ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 6057ec681f3Smrgtu_ResetFences(VkDevice _device, uint32_t count, const VkFence *pFences) 6067ec681f3Smrg{ 6077ec681f3Smrg for (uint32_t i = 0; i < count; i++) { 6087ec681f3Smrg TU_FROM_HANDLE(tu_syncobj, sync, pFences[i]); 6097ec681f3Smrg sync->timestamp_valid = false; 6107ec681f3Smrg } 6117ec681f3Smrg return VK_SUCCESS; 6127ec681f3Smrg} 6137ec681f3Smrg 6147ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 6157ec681f3Smrgtu_GetFenceStatus(VkDevice _device, VkFence _fence) 6167ec681f3Smrg{ 6177ec681f3Smrg TU_FROM_HANDLE(tu_device, device, _device); 6187ec681f3Smrg TU_FROM_HANDLE(tu_syncobj, sync, _fence); 6197ec681f3Smrg 6207ec681f3Smrg if (!sync->timestamp_valid) 6217ec681f3Smrg return VK_NOT_READY; 6227ec681f3Smrg 6237ec681f3Smrg int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID, 6247ec681f3Smrg &(struct kgsl_device_waittimestamp_ctxtid) { 6257ec681f3Smrg .context_id = device->queues[0]->msm_queue_id, 6267ec681f3Smrg .timestamp = sync->timestamp, 6277ec681f3Smrg .timeout = 0, 6287ec681f3Smrg }); 6297ec681f3Smrg if (ret) { 6307ec681f3Smrg assert(errno == ETIME); 6317ec681f3Smrg return VK_NOT_READY; 6327ec681f3Smrg } 6337ec681f3Smrg 6347ec681f3Smrg return VK_SUCCESS; 6357ec681f3Smrg} 6367ec681f3Smrg 6377ec681f3Smrgint 6387ec681f3Smrgtu_signal_fences(struct tu_device *device, struct tu_syncobj *fence1, struct tu_syncobj *fence2) 6397ec681f3Smrg{ 6407ec681f3Smrg tu_finishme("tu_signal_fences"); 6417ec681f3Smrg return 0; 6427ec681f3Smrg} 6437ec681f3Smrg 6447ec681f3Smrgint 6457ec681f3Smrgtu_syncobj_to_fd(struct tu_device *device, struct tu_syncobj *sync) 6467ec681f3Smrg{ 6477ec681f3Smrg tu_finishme("tu_syncobj_to_fd"); 6487ec681f3Smrg return -1; 6497ec681f3Smrg} 6507ec681f3Smrg 6517ec681f3SmrgVkResult 6527ec681f3Smrgtu_device_submit_deferred_locked(struct tu_device *dev) 6537ec681f3Smrg{ 6547ec681f3Smrg tu_finishme("tu_device_submit_deferred_locked"); 6557ec681f3Smrg 6567ec681f3Smrg return VK_SUCCESS; 6577ec681f3Smrg} 6587ec681f3Smrg 6597ec681f3SmrgVkResult 6607ec681f3Smrgtu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj) 6617ec681f3Smrg{ 6627ec681f3Smrg tu_finishme("tu_device_wait_u_trace"); 6637ec681f3Smrg return VK_SUCCESS; 6647ec681f3Smrg} 6657ec681f3Smrg 6667ec681f3Smrgint 6677ec681f3Smrgtu_drm_get_timestamp(struct tu_physical_device *device, uint64_t *ts) 6687ec681f3Smrg{ 6697ec681f3Smrg tu_finishme("tu_drm_get_timestamp"); 6707ec681f3Smrg return 0; 6717ec681f3Smrg} 6727ec681f3Smrg 6737ec681f3Smrg#ifdef ANDROID 6747ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL 6757ec681f3Smrgtu_QueueSignalReleaseImageANDROID(VkQueue _queue, 6767ec681f3Smrg uint32_t waitSemaphoreCount, 6777ec681f3Smrg const VkSemaphore *pWaitSemaphores, 6787ec681f3Smrg VkImage image, 6797ec681f3Smrg int *pNativeFenceFd) 6807ec681f3Smrg{ 6817ec681f3Smrg TU_FROM_HANDLE(tu_queue, queue, _queue); 6827ec681f3Smrg if (!pNativeFenceFd) 6837ec681f3Smrg return VK_SUCCESS; 6847ec681f3Smrg 6857ec681f3Smrg struct tu_syncobj s = sync_merge(pWaitSemaphores, waitSemaphoreCount, true, true); 6867ec681f3Smrg 6877ec681f3Smrg if (!s.timestamp_valid) { 6887ec681f3Smrg *pNativeFenceFd = -1; 6897ec681f3Smrg return VK_SUCCESS; 6907ec681f3Smrg } 6917ec681f3Smrg 6927ec681f3Smrg *pNativeFenceFd = timestamp_to_fd(queue, s.timestamp); 6937ec681f3Smrg 6947ec681f3Smrg return VK_SUCCESS; 6957ec681f3Smrg} 6967ec681f3Smrg#endif 697