| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/llvmpipe/ |
| H A D | lp_scene_queue.c | 30 * Scene queue. We'll use two queues. One contains "full" scenes which 47 * A queue of scenes 57 * to index the array, we use them modulo the queue size. This scheme 58 * works because the queue size is a power of two. 66 /** Allocate a new scene queue */ 70 /* Circular queue behavior depends on size being a power of two. */ 74 struct lp_scene_queue *queue = CALLOC_STRUCT(lp_scene_queue); local in function:lp_scene_queue_create 76 if (!queue) 79 (void) mtx_init(&queue->mutex, mtx_plain); 80 cnd_init(&queue 88 lp_scene_queue_destroy(struct lp_scene_queue * queue) argument 98 lp_scene_dequeue(struct lp_scene_queue * queue,boolean wait) argument 124 lp_scene_enqueue(struct lp_scene_queue * queue,struct lp_scene * scene) argument [all...] |
| H A D | lp_scene_queue.h | 42 lp_scene_queue_destroy(struct lp_scene_queue *queue); 45 lp_scene_dequeue(struct lp_scene_queue *queue, boolean wait); 48 lp_scene_enqueue(struct lp_scene_queue *queue, struct lp_scene *scene);
|
| /xsrc/external/mit/MesaLib/dist/src/vulkan/util/ |
| H A D | vk_queue.c | 29 vk_queue_init(struct vk_queue *queue, struct vk_device *device, argument 33 memset(queue, 0, sizeof(*queue)); 34 vk_object_base_init(device, &queue->base, VK_OBJECT_TYPE_QUEUE); 36 list_addtail(&queue->link, &device->queues); 38 queue->flags = pCreateInfo->flags; 39 queue->queue_family_index = pCreateInfo->queueFamilyIndex; 42 queue->index_in_family = index_in_family; 44 util_dynarray_init(&queue->labels, NULL); 45 queue 51 vk_queue_finish(struct vk_queue * queue) argument [all...] |
| H A D | vk_queue.h | 48 /* Which queue this is within the queue family */ 57 * with a queue attached to it, all "active" labels will also be provided 59 * debug label to the queue: opening a label region and inserting a single 95 vk_queue_init(struct vk_queue *queue, struct vk_device *device, 100 vk_queue_finish(struct vk_queue *queue); 102 #define vk_foreach_queue(queue, device) \ 103 list_for_each_entry(struct vk_queue, queue, &(device)->queues, link) 105 #define vk_foreach_queue_safe(queue, device) \ 106 list_for_each_entry_safe(struct vk_queue, queue, [all...] |
| H A D | vk_debug_utils.c | 243 VK_FROM_HANDLE(vk_queue, queue, _queue); 248 if (!queue->region_begin) 249 (void)util_dynarray_pop(&queue->labels, VkDebugUtilsLabelEXT); 251 util_dynarray_append(&queue->labels, VkDebugUtilsLabelEXT, *pLabelInfo); 252 queue->region_begin = true; 258 VK_FROM_HANDLE(vk_queue, queue, _queue); 263 if (!queue->region_begin) 264 (void)util_dynarray_pop(&queue->labels, VkDebugUtilsLabelEXT); 266 (void)util_dynarray_pop(&queue->labels, VkDebugUtilsLabelEXT); 267 queue [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/drivers/llvmpipe/ |
| H A D | lp_scene_queue.h | 42 lp_scene_queue_destroy(struct lp_scene_queue *queue); 45 lp_scene_dequeue(struct lp_scene_queue *queue, boolean wait); 48 lp_scene_enqueue(struct lp_scene_queue *queue, struct lp_scene *scene);
|
| H A D | lp_scene_queue.c | 30 * Scene queue. We'll use two queues. One contains "full" scenes which 49 * A queue of scenes 58 /** Allocate a new scene queue */ 62 struct lp_scene_queue *queue = CALLOC_STRUCT(lp_scene_queue); local in function:lp_scene_queue_create 63 if (!queue) 66 queue->ring = util_ringbuffer_create( MAX_SCENE_QUEUE * 68 if (queue->ring == NULL) 71 return queue; 74 FREE(queue); 79 /** Delete a scene queue */ 81 lp_scene_queue_destroy(struct lp_scene_queue * queue) argument 90 lp_scene_dequeue(struct lp_scene_queue * queue,boolean wait) argument 110 lp_scene_enqueue(struct lp_scene_queue * queue,struct lp_scene * scene) argument [all...] |
| /xsrc/external/mit/MesaLib/dist/src/util/ |
| H A D | u_queue.c | 47 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads, 98 add_to_atexit_list(struct util_queue *queue) argument 103 list_add(&queue->head, &queue_list); 108 remove_from_atexit_list(struct util_queue *queue) argument 114 if (iter == queue) { 262 struct util_queue *queue; member in struct:thread_input 269 struct util_queue *queue = ((struct thread_input*)input)->queue; local in function:util_queue_thread_func 274 if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) { 290 if (queue 356 util_queue_create_thread(struct util_queue * queue,unsigned index) argument 388 util_queue_adjust_num_threads(struct util_queue * queue,unsigned num_threads) argument 421 util_queue_init(struct util_queue * queue,const char * name,unsigned max_jobs,unsigned num_threads,unsigned flags,void * global_data) argument 512 util_queue_kill_threads(struct util_queue * queue,unsigned keep_num_threads,bool finish_locked) argument 550 util_queue_destroy(struct util_queue * queue) argument 567 util_queue_add_job(struct util_queue * queue,void * job,struct util_queue_fence * fence,util_queue_execute_func execute,util_queue_execute_func cleanup,const size_t job_size) argument 660 util_queue_drop_job(struct util_queue * queue,struct util_queue_fence * fence) argument 692 util_queue_finish(struct util_queue * queue) argument 730 util_queue_get_thread_time_nano(struct util_queue * queue,unsigned thread_index) argument [all...] |
| H A D | u_vector.h | 25 * u_vector is a vector based queue for storing arbitrary 57 int u_vector_init_pow2(struct u_vector *queue, 61 void *u_vector_add(struct u_vector *queue); 62 void *u_vector_remove(struct u_vector *queue); 65 u_vector_init(struct u_vector *queue, argument 71 return u_vector_init_pow2(queue, initial_element_count, element_size); 75 u_vector_length(struct u_vector *queue) argument 77 return (queue->head - queue->tail) / queue 96 u_vector_finish(struct u_vector * queue) argument [all...] |
| H A D | u_queue.h | 27 /* Job queue with execution in a separate thread. 219 size_t total_jobs_size; /* memory use of all jobs in the queue */ 227 bool util_queue_init(struct util_queue *queue, 233 void util_queue_destroy(struct util_queue *queue); 236 void util_queue_add_job(struct util_queue *queue, 242 void util_queue_drop_job(struct util_queue *queue, 245 void util_queue_finish(struct util_queue *queue); 248 * greater than the initial number of threads at the creation of the queue, 252 util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads); 254 int64_t util_queue_get_thread_time_nano(struct util_queue *queue, 259 util_queue_is_initialized(struct util_queue * queue) argument 270 struct util_queue *queue; member in struct:util_queue_monitoring [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/util/ |
| H A D | u_queue.c | 37 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads, 88 add_to_atexit_list(struct util_queue *queue) argument 93 LIST_ADD(&queue->head, &queue_list); 98 remove_from_atexit_list(struct util_queue *queue) argument 104 if (iter == queue) { 248 struct util_queue *queue; member in struct:thread_input 255 struct util_queue *queue = ((struct thread_input*)input)->queue; local in function:util_queue_thread_func 261 if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) { 287 if (strlen(queue 343 util_queue_create_thread(struct util_queue * queue,unsigned index) argument 374 util_queue_adjust_num_threads(struct util_queue * queue,unsigned num_threads) argument 407 util_queue_init(struct util_queue * queue,const char * name,unsigned max_jobs,unsigned num_threads,unsigned flags) argument 496 util_queue_kill_threads(struct util_queue * queue,unsigned keep_num_threads,bool finish_locked) argument 527 util_queue_destroy(struct util_queue * queue) argument 541 util_queue_add_job(struct util_queue * queue,void * job,struct util_queue_fence * fence,util_queue_execute_func execute,util_queue_execute_func cleanup) argument 620 util_queue_drop_job(struct util_queue * queue,struct util_queue_fence * fence) argument 659 util_queue_finish(struct util_queue * queue) argument 689 util_queue_get_thread_time_nano(struct util_queue * queue,unsigned thread_index) argument [all...] |
| H A D | u_vector.h | 25 * u_vector is a vector based queue for storing arbitrary 52 int u_vector_init(struct u_vector *queue, uint32_t element_size, uint32_t size); 53 void *u_vector_add(struct u_vector *queue); 54 void *u_vector_remove(struct u_vector *queue); 57 u_vector_length(struct u_vector *queue) argument 59 return (queue->head - queue->tail) / queue->element_size; 78 u_vector_finish(struct u_vector *queue) argument 80 free(queue [all...] |
| H A D | u_queue.h | 27 /* Job queue with execution in a separate thread. 221 bool util_queue_init(struct util_queue *queue, 226 void util_queue_destroy(struct util_queue *queue); 229 void util_queue_add_job(struct util_queue *queue, 234 void util_queue_drop_job(struct util_queue *queue, 237 void util_queue_finish(struct util_queue *queue); 240 * greater than the initial number of threads at the creation of the queue, 244 util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads); 246 int64_t util_queue_get_thread_time_nano(struct util_queue *queue, 251 util_queue_is_initialized(struct util_queue *queue) argument 262 struct util_queue *queue; member in struct:util_queue_monitoring [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/drivers/virgl/ |
| H A D | virgl_transfer_queue.h | 40 void virgl_transfer_queue_init(struct virgl_transfer_queue *queue, 44 void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue); 46 int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue, 49 int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue, 52 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue, 56 * Search the transfer queue for a transfer suitable for extension and 60 struct virgl_transfer_queue *queue, struct virgl_transfer *transfer);
|
| H A D | virgl_transfer_queue.c | 42 typedef void (*list_action_t)(struct virgl_transfer_queue *queue, 102 static void set_true(UNUSED struct virgl_transfer_queue *queue, argument 109 static void set_queued(UNUSED struct virgl_transfer_queue *queue, argument 117 static void remove_transfer(struct virgl_transfer_queue *queue, argument 124 virgl_resource_destroy_transfer(queue->pool, queued); 127 static void replace_unmapped_transfer(struct virgl_transfer_queue *queue, argument 136 remove_transfer(queue, args); 137 queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1); 140 static void transfer_put(struct virgl_transfer_queue *queue, argument 146 queue 153 transfer_write(struct virgl_transfer_queue * queue,struct list_action_args * args) argument 167 compare_and_perform_action(struct virgl_transfer_queue * queue,struct list_iteration_args * iter) argument 186 intersect_and_set_queued_once(struct virgl_transfer_queue * queue,struct list_iteration_args * iter) argument 206 perform_action(struct virgl_transfer_queue * queue,struct list_iteration_args * iter) argument 222 add_internal(struct virgl_transfer_queue * queue,struct virgl_transfer * transfer) argument 247 virgl_transfer_queue_init(struct virgl_transfer_queue * queue,struct virgl_screen * vs,struct slab_child_pool * pool) argument 265 virgl_transfer_queue_fini(struct virgl_transfer_queue * queue) argument 289 virgl_transfer_queue_unmap(struct virgl_transfer_queue * queue,struct virgl_transfer * transfer) argument 312 virgl_transfer_queue_clear(struct virgl_transfer_queue * queue,struct virgl_cmd_buf * cbuf) argument 342 virgl_transfer_queue_is_queued(struct virgl_transfer_queue * queue,struct virgl_transfer * transfer) argument 364 virgl_transfer_queue_extend(struct virgl_transfer_queue * queue,struct virgl_transfer * transfer) argument [all...] |
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/virgl/ |
| H A D | virgl_transfer_queue.h | 43 void virgl_transfer_queue_init(struct virgl_transfer_queue *queue, 46 void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue); 48 int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue, 51 int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue, 54 bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue, 58 * Search the transfer queue for a transfer suitable for extension and 61 bool virgl_transfer_queue_extend_buffer(struct virgl_transfer_queue *queue,
|
| H A D | virgl_transfer_queue.c | 44 typedef void (*list_action_t)(struct virgl_transfer_queue *queue, 140 virgl_transfer_queue_find_overlap(const struct virgl_transfer_queue *queue, argument 147 LIST_FOR_EACH_ENTRY(xfer, &queue->transfer_list, queue_link) { 162 static void remove_transfer(struct virgl_transfer_queue *queue, argument 166 virgl_resource_destroy_transfer(queue->vctx, queued); 169 static void replace_unmapped_transfer(struct virgl_transfer_queue *queue, argument 178 remove_transfer(queue, queued); 179 queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1); 182 static void transfer_put(struct virgl_transfer_queue *queue, argument 187 queue 195 transfer_write(struct virgl_transfer_queue * queue,struct list_action_args * args) argument 208 compare_and_perform_action(struct virgl_transfer_queue * queue,struct list_iteration_args * iter) argument 226 perform_action(struct virgl_transfer_queue * queue,struct list_iteration_args * iter) argument 241 add_internal(struct virgl_transfer_queue * queue,struct virgl_transfer * transfer) argument 264 virgl_transfer_queue_init(struct virgl_transfer_queue * queue,struct virgl_context * vctx) argument 282 virgl_transfer_queue_fini(struct virgl_transfer_queue * queue) argument 301 virgl_transfer_queue_unmap(struct virgl_transfer_queue * queue,struct virgl_transfer * transfer) argument 322 virgl_transfer_queue_clear(struct virgl_transfer_queue * queue,struct virgl_cmd_buf * cbuf) argument 348 virgl_transfer_queue_is_queued(struct virgl_transfer_queue * queue,struct virgl_transfer * transfer) argument 359 virgl_transfer_queue_extend_buffer(struct virgl_transfer_queue * queue,const struct virgl_hw_res * hw_res,unsigned offset,unsigned size,const void * data) argument [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/vulkan/wsi/ |
| H A D | wsi_common_queue.h | 38 wsi_queue_init(struct wsi_queue *queue, int length) argument 46 ret = u_vector_init(&queue->vector, sizeof(uint32_t), 60 ret = pthread_cond_init(&queue->cond, &condattr); 64 ret = pthread_mutex_init(&queue->mutex, NULL); 72 pthread_cond_destroy(&queue->cond); 76 u_vector_finish(&queue->vector); 82 wsi_queue_destroy(struct wsi_queue *queue) argument 84 u_vector_finish(&queue->vector); 85 pthread_mutex_destroy(&queue->mutex); 86 pthread_cond_destroy(&queue 90 wsi_queue_push(struct wsi_queue * queue,uint32_t index) argument 109 wsi_queue_pull(struct wsi_queue * queue,uint32_t * index,uint64_t timeout) argument [all...] |
| /xsrc/external/mit/MesaLib/dist/src/vulkan/wsi/ |
| H A D | wsi_common_queue.h | 38 wsi_queue_init(struct wsi_queue *queue, int length) argument 45 ret = u_vector_init(&queue->vector, length, sizeof(uint32_t)); 58 ret = pthread_cond_init(&queue->cond, &condattr); 62 ret = pthread_mutex_init(&queue->mutex, NULL); 70 pthread_cond_destroy(&queue->cond); 74 u_vector_finish(&queue->vector); 80 wsi_queue_destroy(struct wsi_queue *queue) argument 82 u_vector_finish(&queue->vector); 83 pthread_mutex_destroy(&queue->mutex); 84 pthread_cond_destroy(&queue 88 wsi_queue_push(struct wsi_queue * queue,uint32_t index) argument 107 wsi_queue_pull(struct wsi_queue * queue,uint32_t * index,uint64_t timeout) argument [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/drivers/svga/ |
| H A D | svga_state_tss.c | 82 struct bind_queue *queue) 130 queue->bind[queue->bind_count].unit = unit; 131 queue->bind[queue->bind_count].view = view; 132 queue->bind_count++; 150 struct bind_queue queue; local in function:update_tss_binding 155 queue.bind_count = 0; 163 &queue); 176 &queue); 76 emit_tex_binding_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * s,const struct pipe_sampler_view * sv,struct svga_hw_view_state * view,boolean reemit,struct bind_queue * queue) argument 241 struct bind_queue queue; local in function:svga_reemit_tss_bindings 356 emit_tss_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * state,struct ts_queue * queue) argument 387 struct ts_queue queue; local in function:update_tss [all...] |
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/svga/ |
| H A D | svga_state_tss.c | 82 struct bind_queue *queue) 130 queue->bind[queue->bind_count].unit = unit; 131 queue->bind[queue->bind_count].view = view; 132 queue->bind_count++; 150 struct bind_queue queue; local in function:update_tss_binding 154 queue.bind_count = 0; 162 &queue); 176 &queue); 76 emit_tex_binding_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * s,const struct pipe_sampler_view * sv,struct svga_hw_view_state * view,boolean reemit,struct bind_queue * queue) argument 241 struct bind_queue queue; local in function:svga_reemit_tss_bindings 357 emit_tss_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * state,struct ts_queue * queue) argument 388 struct ts_queue queue; local in function:update_tss [all...] |
| /xsrc/external/mit/MesaLib/dist/src/freedreno/vulkan/ |
| H A D | tu_wsi.c | 90 TU_FROM_HANDLE(tu_queue, queue, _queue); 92 u_trace_context_process(&queue->device->trace_context, true); 95 &queue->device->physical_device->wsi_device, 96 tu_device_to_handle(queue->device), _queue, queue->vk.queue_family_index,
|
| /xsrc/external/mit/MesaLib/dist/src/intel/vulkan/ |
| H A D | anv_queue.c | 240 static VkResult anv_queue_submit_add_fence_bo(struct anv_queue *queue, 246 anv_queue_submit_timeline_locked(struct anv_queue *queue, argument 261 result = anv_queue_submit_add_fence_bo(queue, submit, point->bo, false); 272 result = anv_timeline_add_point_locked(queue->device, timeline, 277 result = anv_queue_submit_add_fence_bo(queue, submit, point->bo, true); 282 result = anv_queue_execbuf_locked(queue, submit); 310 anv_queue_submit_deferred_locked(struct anv_queue *queue, uint32_t *advance) argument 318 &queue->queued_submits, link) { 325 result = anv_queue_submit_timeline_locked(queue, submit); 327 anv_queue_submit_free(queue 345 struct anv_queue *queue = &device->queues[i]; local in function:anv_device_submit_deferred_locked 370 struct anv_queue *queue = _queue; local in function:anv_queue_task 436 anv_queue_submit_post(struct anv_queue * queue,struct anv_queue_submit ** _submit,bool flush_queue) argument 475 anv_queue_init(struct anv_device * device,struct anv_queue * queue,uint32_t exec_flags,const VkDeviceQueueCreateInfo * pCreateInfo,uint32_t index_in_family) argument 530 anv_queue_finish(struct anv_queue * queue) argument 549 anv_queue_submit_add_fence_bo(struct anv_queue * queue,struct anv_queue_submit * submit,struct anv_bo * bo,bool signal) argument 576 anv_queue_submit_add_syncobj(struct anv_queue * queue,struct anv_queue_submit * submit,uint32_t handle,uint32_t flags,uint64_t value) argument 647 anv_queue_submit_add_timeline_wait(struct anv_queue * queue,struct anv_queue_submit * submit,struct anv_timeline * timeline,uint64_t value) argument 684 anv_queue_submit_add_timeline_signal(struct anv_queue * queue,struct anv_queue_submit * submit,struct anv_timeline * timeline,uint64_t value) argument 742 anv_queue_submit_simple_batch(struct anv_queue * queue,struct anv_batch * batch) argument 833 add_temporary_semaphore(struct anv_queue * queue,struct anv_queue_submit * submit,struct anv_semaphore_impl * impl,struct anv_semaphore_impl ** out_impl) argument 876 clone_syncobj_dma_fence(struct anv_queue * queue,struct anv_semaphore_impl * out,const struct anv_semaphore_impl * in) argument 919 maybe_transfer_temporary_semaphore(struct anv_queue * queue,struct anv_queue_submit * submit,struct anv_semaphore * semaphore,struct anv_semaphore_impl ** out_impl) argument 982 anv_queue_submit_add_in_semaphore(struct anv_queue * queue,struct anv_queue_submit * submit,const VkSemaphore _semaphore,const uint64_t value) argument 1068 anv_queue_submit_add_out_semaphore(struct anv_queue * queue,struct anv_queue_submit * submit,const VkSemaphore _semaphore,const uint64_t value) argument 1136 anv_queue_submit_add_fence(struct anv_queue * queue,struct anv_queue_submit * submit,struct anv_fence * fence) argument 1221 anv_queue_submit_add_cmd_buffer(struct anv_queue * queue,struct anv_queue_submit * submit,struct anv_cmd_buffer * cmd_buffer,int perf_pass) argument 1313 anv_queue_submit_post_and_alloc_new(struct anv_queue * queue,struct anv_queue_submit ** submit) argument [all...] |
| /xsrc/external/mit/MesaLib/dist/src/broadcom/vulkan/ |
| H A D | v3dv_queue.c | 90 queue_submit_job(struct v3dv_queue *queue, 99 cpu_queue_wait_idle(struct v3dv_queue *queue) argument 104 mtx_lock(&queue->mutex); 106 &queue->submit_wait_list, list_link) { 122 mtx_unlock(&queue->mutex); 129 mtx_unlock(&queue->mutex); 133 gpu_queue_wait_idle(struct v3dv_queue *queue) argument 135 struct v3dv_device *device = queue->device; 152 V3DV_FROM_HANDLE(v3dv_queue, queue, _queue); 157 cpu_queue_wait_idle(queue); 295 wait_thread_finish(struct v3dv_queue * queue,pthread_t thread) argument 329 struct v3dv_queue *queue = &job->device->queue; local in function:event_wait_thread_func 479 handle_csd_indirect_cpu_job(struct v3dv_queue * queue,struct v3dv_job * job,bool do_sem_wait) argument 581 handle_cl_job(struct v3dv_queue * queue,struct v3dv_job * job,bool do_sem_wait) argument 670 handle_tfu_job(struct v3dv_queue * queue,struct v3dv_job * job,bool do_sem_wait) argument 694 handle_csd_job(struct v3dv_queue * queue,struct v3dv_job * job,bool do_sem_wait) argument 738 queue_submit_job(struct v3dv_queue * queue,struct v3dv_job * job,bool do_sem_wait,pthread_t * wait_thread) argument 774 queue_create_noop_job(struct v3dv_queue * queue) argument 789 queue_submit_noop_job(struct v3dv_queue * queue,const VkSubmitInfo * pSubmit) argument 805 queue_submit_cmd_buffer(struct v3dv_queue * queue,struct v3dv_cmd_buffer * cmd_buffer,const VkSubmitInfo * pSubmit,pthread_t * wait_thread) argument 897 queue_submit_cmd_buffer_batch(struct v3dv_queue * queue,const VkSubmitInfo * pSubmit,struct v3dv_queue_submit_wait_info ** wait_info) argument 959 struct v3dv_queue *queue = &wait_info->device->queue; local in function:master_wait_thread_func 993 spawn_master_wait_thread(struct v3dv_queue * queue,struct v3dv_queue_submit_wait_info * wait_info) argument [all...] |
| /xsrc/external/mit/MesaLib/dist/src/panfrost/vulkan/ |
| H A D | panvk_vX_device.c | 37 panvk_queue_submit_batch(struct panvk_queue *queue, argument 43 const struct panvk_device *dev = queue->device; 75 .out_sync = queue->sync, 95 .out_sync = queue->sync, 101 submit.in_syncs = (uintptr_t)(&queue->sync); 126 panvk_queue_transfer_sync(struct panvk_queue *queue, uint32_t syncobj) argument 128 const struct panfrost_device *pdev = &queue->device->physical_device->pdev; 132 .handle = queue->sync, 169 panvk_signal_event_syncobjs(struct panvk_queue *queue, struct panvk_batch *batch) argument 171 const struct panfrost_device *pdev = &queue [all...] |