Home | History | Annotate | Line # | Download | only in drm
drm_syncobj.c revision 1.2
      1 /*	$NetBSD: drm_syncobj.c,v 1.2 2021/12/18 23:44:57 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2017 Red Hat
      5  * Parts ported from amdgpu (fence wait code).
      6  * Copyright 2016 Advanced Micro Devices, Inc.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the next
     16  * paragraph) shall be included in all copies or substantial portions of the
     17  * Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     25  * IN THE SOFTWARE.
     26  *
     27  * Authors:
     28  *
     29  */
     30 
     31 /**
     32  * DOC: Overview
     33  *
     34  * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
     35  * container for a synchronization primitive which can be used by userspace
     36  * to explicitly synchronize GPU commands, can be shared between userspace
     37  * processes, and can be shared between different DRM drivers.
     38  * Their primary use-case is to implement Vulkan fences and semaphores.
     39  * The syncobj userspace API provides ioctls for several operations:
     40  *
     41  *  - Creation and destruction of syncobjs
     42  *  - Import and export of syncobjs to/from a syncobj file descriptor
     43  *  - Import and export a syncobj's underlying fence to/from a sync file
     44  *  - Reset a syncobj (set its fence to NULL)
     45  *  - Signal a syncobj (set a trivially signaled fence)
     46  *  - Wait for a syncobj's fence to appear and be signaled
     47  *
     48  * At it's core, a syncobj is simply a wrapper around a pointer to a struct
     49  * &dma_fence which may be NULL.
     50  * When a syncobj is first created, its pointer is either NULL or a pointer
     51  * to an already signaled fence depending on whether the
     52  * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
     53  * &DRM_IOCTL_SYNCOBJ_CREATE.
     54  * When GPU work which signals a syncobj is enqueued in a DRM driver,
     55  * the syncobj fence is replaced with a fence which will be signaled by the
     56  * completion of that work.
     57  * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
     58  * driver retrieves syncobj's current fence at the time the work is enqueued
     59  * waits on that fence before submitting the work to hardware.
     60  * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
     61  * All manipulation of the syncobjs's fence happens in terms of the current
     62  * fence at the time the ioctl is called by userspace regardless of whether
     63  * that operation is an immediate host-side operation (signal or reset) or
     64  * or an operation which is enqueued in some driver queue.
     65  * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
     66  * manipulate a syncobj from the host by resetting its pointer to NULL or
     67  * setting its pointer to a fence which is already signaled.
     68  *
     69  *
     70  * Host-side wait on syncobjs
     71  * --------------------------
     72  *
     73  * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
     74  * host-side wait on all of the syncobj fences simultaneously.
     75  * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
     76  * all of the syncobj fences to be signaled before it returns.
     77  * Otherwise, it returns once at least one syncobj fence has been signaled
     78  * and the index of a signaled fence is written back to the client.
     79  *
     80  * Unlike the enqueued GPU work dependencies which fail if they see a NULL
     81  * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
     82  * the host-side wait will first wait for the syncobj to receive a non-NULL
     83  * fence and then wait on that fence.
     84  * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
     85  * syncobjs in the array has a NULL fence, -EINVAL will be returned.
     86  * Assuming the syncobj starts off with a NULL fence, this allows a client
     87  * to do a host wait in one thread (or process) which waits on GPU work
     88  * submitted in another thread (or process) without having to manually
     89  * synchronize between the two.
     90  * This requirement is inherited from the Vulkan fence API.
     91  *
     92  *
     93  * Import/export of syncobjs
     94  * -------------------------
     95  *
     96  * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
     97  * provide two mechanisms for import/export of syncobjs.
     98  *
     99  * The first lets the client import or export an entire syncobj to a file
    100  * descriptor.
    101  * These fd's are opaque and have no other use case, except passing the
    102  * syncobj between processes.
    103  * All exported file descriptors and any syncobj handles created as a
    104  * result of importing those file descriptors own a reference to the
    105  * same underlying struct &drm_syncobj and the syncobj can be used
    106  * persistently across all the processes with which it is shared.
    107  * The syncobj is freed only once the last reference is dropped.
    108  * Unlike dma-buf, importing a syncobj creates a new handle (with its own
    109  * reference) for every import instead of de-duplicating.
    110  * The primary use-case of this persistent import/export is for shared
    111  * Vulkan fences and semaphores.
    112  *
    113  * The second import/export mechanism, which is indicated by
    114  * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
    115  * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
    116  * import/export the syncobj's current fence from/to a &sync_file.
    117  * When a syncobj is exported to a sync file, that sync file wraps the
    118  * sycnobj's fence at the time of export and any later signal or reset
    119  * operations on the syncobj will not affect the exported sync file.
    120  * When a sync file is imported into a syncobj, the syncobj's fence is set
    121  * to the fence wrapped by that sync file.
    122  * Because sync files are immutable, resetting or signaling the syncobj
    123  * will not affect any sync files whose fences have been imported into the
    124  * syncobj.
    125  */
    126 
    127 #include <sys/cdefs.h>
    128 __KERNEL_RCSID(0, "$NetBSD: drm_syncobj.c,v 1.2 2021/12/18 23:44:57 riastradh Exp $");
    129 
    130 #include <linux/anon_inodes.h>
    131 #include <linux/file.h>
    132 #include <linux/fs.h>
    133 #include <linux/sched/signal.h>
    134 #include <linux/sync_file.h>
    135 #include <linux/uaccess.h>
    136 
    137 #include <drm/drm.h>
    138 #include <drm/drm_drv.h>
    139 #include <drm/drm_file.h>
    140 #include <drm/drm_gem.h>
    141 #include <drm/drm_print.h>
    142 #include <drm/drm_syncobj.h>
    143 #include <drm/drm_utils.h>
    144 
    145 #include "drm_internal.h"
    146 
    147 struct syncobj_wait_entry {
    148 	struct list_head node;
    149 	struct task_struct *task;
    150 	struct dma_fence *fence;
    151 	struct dma_fence_cb fence_cb;
    152 	u64    point;
    153 };
    154 
    155 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
    156 				      struct syncobj_wait_entry *wait);
    157 
    158 /**
    159  * drm_syncobj_find - lookup and reference a sync object.
    160  * @file_private: drm file private pointer
    161  * @handle: sync object handle to lookup.
    162  *
    163  * Returns a reference to the syncobj pointed to by handle or NULL. The
    164  * reference must be released by calling drm_syncobj_put().
    165  */
    166 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
    167 				     u32 handle)
    168 {
    169 	struct drm_syncobj *syncobj;
    170 
    171 	spin_lock(&file_private->syncobj_table_lock);
    172 
    173 	/* Check if we currently have a reference on the object */
    174 	syncobj = idr_find(&file_private->syncobj_idr, handle);
    175 	if (syncobj)
    176 		drm_syncobj_get(syncobj);
    177 
    178 	spin_unlock(&file_private->syncobj_table_lock);
    179 
    180 	return syncobj;
    181 }
    182 EXPORT_SYMBOL(drm_syncobj_find);
    183 
    184 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
    185 				       struct syncobj_wait_entry *wait)
    186 {
    187 	struct dma_fence *fence;
    188 
    189 	if (wait->fence)
    190 		return;
    191 
    192 	spin_lock(&syncobj->lock);
    193 	/* We've already tried once to get a fence and failed.  Now that we
    194 	 * have the lock, try one more time just to be sure we don't add a
    195 	 * callback when a fence has already been set.
    196 	 */
    197 	fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
    198 	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
    199 		dma_fence_put(fence);
    200 		list_add_tail(&wait->node, &syncobj->cb_list);
    201 	} else if (!fence) {
    202 		wait->fence = dma_fence_get_stub();
    203 	} else {
    204 		wait->fence = fence;
    205 	}
    206 	spin_unlock(&syncobj->lock);
    207 }
    208 
    209 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
    210 				    struct syncobj_wait_entry *wait)
    211 {
    212 	if (!wait->node.next)
    213 		return;
    214 
    215 	spin_lock(&syncobj->lock);
    216 	list_del_init(&wait->node);
    217 	spin_unlock(&syncobj->lock);
    218 }
    219 
    220 /**
    221  * drm_syncobj_add_point - add new timeline point to the syncobj
    222  * @syncobj: sync object to add timeline point do
    223  * @chain: chain node to use to add the point
    224  * @fence: fence to encapsulate in the chain node
    225  * @point: sequence number to use for the point
    226  *
    227  * Add the chain node as new timeline point to the syncobj.
    228  */
    229 void drm_syncobj_add_point(struct drm_syncobj *syncobj,
    230 			   struct dma_fence_chain *chain,
    231 			   struct dma_fence *fence,
    232 			   uint64_t point)
    233 {
    234 	struct syncobj_wait_entry *cur, *tmp;
    235 	struct dma_fence *prev;
    236 
    237 	dma_fence_get(fence);
    238 
    239 	spin_lock(&syncobj->lock);
    240 
    241 	prev = drm_syncobj_fence_get(syncobj);
    242 	/* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
    243 	if (prev && prev->seqno >= point)
    244 		DRM_ERROR("You are adding an unorder point to timeline!\n");
    245 	dma_fence_chain_init(chain, prev, fence, point);
    246 	rcu_assign_pointer(syncobj->fence, &chain->base);
    247 
    248 	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
    249 		syncobj_wait_syncobj_func(syncobj, cur);
    250 	spin_unlock(&syncobj->lock);
    251 
    252 	/* Walk the chain once to trigger garbage collection */
    253 	dma_fence_chain_for_each(fence, prev);
    254 	dma_fence_put(prev);
    255 }
    256 EXPORT_SYMBOL(drm_syncobj_add_point);
    257 
    258 /**
    259  * drm_syncobj_replace_fence - replace fence in a sync object.
    260  * @syncobj: Sync object to replace fence in
    261  * @fence: fence to install in sync file.
    262  *
    263  * This replaces the fence on a sync object.
    264  */
    265 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
    266 			       struct dma_fence *fence)
    267 {
    268 	struct dma_fence *old_fence;
    269 	struct syncobj_wait_entry *cur, *tmp;
    270 
    271 	if (fence)
    272 		dma_fence_get(fence);
    273 
    274 	spin_lock(&syncobj->lock);
    275 
    276 	old_fence = rcu_dereference_protected(syncobj->fence,
    277 					      lockdep_is_held(&syncobj->lock));
    278 	rcu_assign_pointer(syncobj->fence, fence);
    279 
    280 	if (fence != old_fence) {
    281 		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
    282 			syncobj_wait_syncobj_func(syncobj, cur);
    283 	}
    284 
    285 	spin_unlock(&syncobj->lock);
    286 
    287 	dma_fence_put(old_fence);
    288 }
    289 EXPORT_SYMBOL(drm_syncobj_replace_fence);
    290 
    291 /**
    292  * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
    293  * @syncobj: sync object to assign the fence on
    294  *
    295  * Assign a already signaled stub fence to the sync object.
    296  */
    297 static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
    298 {
    299 	struct dma_fence *fence = dma_fence_get_stub();
    300 
    301 	drm_syncobj_replace_fence(syncobj, fence);
    302 	dma_fence_put(fence);
    303 }
    304 
    305 /* 5s default for wait submission */
    306 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
    307 /**
    308  * drm_syncobj_find_fence - lookup and reference the fence in a sync object
    309  * @file_private: drm file private pointer
    310  * @handle: sync object handle to lookup.
    311  * @point: timeline point
    312  * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
    313  * @fence: out parameter for the fence
    314  *
    315  * This is just a convenience function that combines drm_syncobj_find() and
    316  * drm_syncobj_fence_get().
    317  *
    318  * Returns 0 on success or a negative error value on failure. On success @fence
    319  * contains a reference to the fence, which must be released by calling
    320  * dma_fence_put().
    321  */
    322 int drm_syncobj_find_fence(struct drm_file *file_private,
    323 			   u32 handle, u64 point, u64 flags,
    324 			   struct dma_fence **fence)
    325 {
    326 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
    327 	struct syncobj_wait_entry wait;
    328 	u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
    329 	int ret;
    330 
    331 	if (!syncobj)
    332 		return -ENOENT;
    333 
    334 	*fence = drm_syncobj_fence_get(syncobj);
    335 	drm_syncobj_put(syncobj);
    336 
    337 	if (*fence) {
    338 		ret = dma_fence_chain_find_seqno(fence, point);
    339 		if (!ret)
    340 			return 0;
    341 		dma_fence_put(*fence);
    342 	} else {
    343 		ret = -EINVAL;
    344 	}
    345 
    346 	if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
    347 		return ret;
    348 
    349 	memset(&wait, 0, sizeof(wait));
    350 	wait.task = current;
    351 	wait.point = point;
    352 	drm_syncobj_fence_add_wait(syncobj, &wait);
    353 
    354 	do {
    355 		set_current_state(TASK_INTERRUPTIBLE);
    356 		if (wait.fence) {
    357 			ret = 0;
    358 			break;
    359 		}
    360                 if (timeout == 0) {
    361                         ret = -ETIME;
    362                         break;
    363                 }
    364 
    365 		if (signal_pending(current)) {
    366 			ret = -ERESTARTSYS;
    367 			break;
    368 		}
    369 
    370                 timeout = schedule_timeout(timeout);
    371 	} while (1);
    372 
    373 	__set_current_state(TASK_RUNNING);
    374 	*fence = wait.fence;
    375 
    376 	if (wait.node.next)
    377 		drm_syncobj_remove_wait(syncobj, &wait);
    378 
    379 	return ret;
    380 }
    381 EXPORT_SYMBOL(drm_syncobj_find_fence);
    382 
    383 /**
    384  * drm_syncobj_free - free a sync object.
    385  * @kref: kref to free.
    386  *
    387  * Only to be called from kref_put in drm_syncobj_put.
    388  */
    389 void drm_syncobj_free(struct kref *kref)
    390 {
    391 	struct drm_syncobj *syncobj = container_of(kref,
    392 						   struct drm_syncobj,
    393 						   refcount);
    394 	drm_syncobj_replace_fence(syncobj, NULL);
    395 	kfree(syncobj);
    396 }
    397 EXPORT_SYMBOL(drm_syncobj_free);
    398 
    399 /**
    400  * drm_syncobj_create - create a new syncobj
    401  * @out_syncobj: returned syncobj
    402  * @flags: DRM_SYNCOBJ_* flags
    403  * @fence: if non-NULL, the syncobj will represent this fence
    404  *
    405  * This is the first function to create a sync object. After creating, drivers
    406  * probably want to make it available to userspace, either through
    407  * drm_syncobj_get_handle() or drm_syncobj_get_fd().
    408  *
    409  * Returns 0 on success or a negative error value on failure.
    410  */
    411 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
    412 		       struct dma_fence *fence)
    413 {
    414 	struct drm_syncobj *syncobj;
    415 
    416 	syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
    417 	if (!syncobj)
    418 		return -ENOMEM;
    419 
    420 	kref_init(&syncobj->refcount);
    421 	INIT_LIST_HEAD(&syncobj->cb_list);
    422 	spin_lock_init(&syncobj->lock);
    423 
    424 	if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
    425 		drm_syncobj_assign_null_handle(syncobj);
    426 
    427 	if (fence)
    428 		drm_syncobj_replace_fence(syncobj, fence);
    429 
    430 	*out_syncobj = syncobj;
    431 	return 0;
    432 }
    433 EXPORT_SYMBOL(drm_syncobj_create);
    434 
    435 /**
    436  * drm_syncobj_get_handle - get a handle from a syncobj
    437  * @file_private: drm file private pointer
    438  * @syncobj: Sync object to export
    439  * @handle: out parameter with the new handle
    440  *
    441  * Exports a sync object created with drm_syncobj_create() as a handle on
    442  * @file_private to userspace.
    443  *
    444  * Returns 0 on success or a negative error value on failure.
    445  */
    446 int drm_syncobj_get_handle(struct drm_file *file_private,
    447 			   struct drm_syncobj *syncobj, u32 *handle)
    448 {
    449 	int ret;
    450 
    451 	/* take a reference to put in the idr */
    452 	drm_syncobj_get(syncobj);
    453 
    454 	idr_preload(GFP_KERNEL);
    455 	spin_lock(&file_private->syncobj_table_lock);
    456 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
    457 	spin_unlock(&file_private->syncobj_table_lock);
    458 
    459 	idr_preload_end();
    460 
    461 	if (ret < 0) {
    462 		drm_syncobj_put(syncobj);
    463 		return ret;
    464 	}
    465 
    466 	*handle = ret;
    467 	return 0;
    468 }
    469 EXPORT_SYMBOL(drm_syncobj_get_handle);
    470 
    471 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
    472 					u32 *handle, uint32_t flags)
    473 {
    474 	int ret;
    475 	struct drm_syncobj *syncobj;
    476 
    477 	ret = drm_syncobj_create(&syncobj, flags, NULL);
    478 	if (ret)
    479 		return ret;
    480 
    481 	ret = drm_syncobj_get_handle(file_private, syncobj, handle);
    482 	drm_syncobj_put(syncobj);
    483 	return ret;
    484 }
    485 
    486 static int drm_syncobj_destroy(struct drm_file *file_private,
    487 			       u32 handle)
    488 {
    489 	struct drm_syncobj *syncobj;
    490 
    491 	spin_lock(&file_private->syncobj_table_lock);
    492 	syncobj = idr_remove(&file_private->syncobj_idr, handle);
    493 	spin_unlock(&file_private->syncobj_table_lock);
    494 
    495 	if (!syncobj)
    496 		return -EINVAL;
    497 
    498 	drm_syncobj_put(syncobj);
    499 	return 0;
    500 }
    501 
    502 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
    503 {
    504 	struct drm_syncobj *syncobj = file->private_data;
    505 
    506 	drm_syncobj_put(syncobj);
    507 	return 0;
    508 }
    509 
    510 static const struct file_operations drm_syncobj_file_fops = {
    511 	.release = drm_syncobj_file_release,
    512 };
    513 
    514 /**
    515  * drm_syncobj_get_fd - get a file descriptor from a syncobj
    516  * @syncobj: Sync object to export
    517  * @p_fd: out parameter with the new file descriptor
    518  *
    519  * Exports a sync object created with drm_syncobj_create() as a file descriptor.
    520  *
    521  * Returns 0 on success or a negative error value on failure.
    522  */
    523 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
    524 {
    525 	struct file *file;
    526 	int fd;
    527 
    528 	fd = get_unused_fd_flags(O_CLOEXEC);
    529 	if (fd < 0)
    530 		return fd;
    531 
    532 	file = anon_inode_getfile("syncobj_file",
    533 				  &drm_syncobj_file_fops,
    534 				  syncobj, 0);
    535 	if (IS_ERR(file)) {
    536 		put_unused_fd(fd);
    537 		return PTR_ERR(file);
    538 	}
    539 
    540 	drm_syncobj_get(syncobj);
    541 	fd_install(fd, file);
    542 
    543 	*p_fd = fd;
    544 	return 0;
    545 }
    546 EXPORT_SYMBOL(drm_syncobj_get_fd);
    547 
    548 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
    549 				    u32 handle, int *p_fd)
    550 {
    551 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
    552 	int ret;
    553 
    554 	if (!syncobj)
    555 		return -EINVAL;
    556 
    557 	ret = drm_syncobj_get_fd(syncobj, p_fd);
    558 	drm_syncobj_put(syncobj);
    559 	return ret;
    560 }
    561 
    562 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
    563 				    int fd, u32 *handle)
    564 {
    565 	struct drm_syncobj *syncobj;
    566 	struct fd f = fdget(fd);
    567 	int ret;
    568 
    569 	if (!f.file)
    570 		return -EINVAL;
    571 
    572 	if (f.file->f_op != &drm_syncobj_file_fops) {
    573 		fdput(f);
    574 		return -EINVAL;
    575 	}
    576 
    577 	/* take a reference to put in the idr */
    578 	syncobj = f.file->private_data;
    579 	drm_syncobj_get(syncobj);
    580 
    581 	idr_preload(GFP_KERNEL);
    582 	spin_lock(&file_private->syncobj_table_lock);
    583 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
    584 	spin_unlock(&file_private->syncobj_table_lock);
    585 	idr_preload_end();
    586 
    587 	if (ret > 0) {
    588 		*handle = ret;
    589 		ret = 0;
    590 	} else
    591 		drm_syncobj_put(syncobj);
    592 
    593 	fdput(f);
    594 	return ret;
    595 }
    596 
    597 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
    598 					      int fd, int handle)
    599 {
    600 	struct dma_fence *fence = sync_file_get_fence(fd);
    601 	struct drm_syncobj *syncobj;
    602 
    603 	if (!fence)
    604 		return -EINVAL;
    605 
    606 	syncobj = drm_syncobj_find(file_private, handle);
    607 	if (!syncobj) {
    608 		dma_fence_put(fence);
    609 		return -ENOENT;
    610 	}
    611 
    612 	drm_syncobj_replace_fence(syncobj, fence);
    613 	dma_fence_put(fence);
    614 	drm_syncobj_put(syncobj);
    615 	return 0;
    616 }
    617 
    618 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
    619 					int handle, int *p_fd)
    620 {
    621 	int ret;
    622 	struct dma_fence *fence;
    623 	struct sync_file *sync_file;
    624 	int fd = get_unused_fd_flags(O_CLOEXEC);
    625 
    626 	if (fd < 0)
    627 		return fd;
    628 
    629 	ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
    630 	if (ret)
    631 		goto err_put_fd;
    632 
    633 	sync_file = sync_file_create(fence);
    634 
    635 	dma_fence_put(fence);
    636 
    637 	if (!sync_file) {
    638 		ret = -EINVAL;
    639 		goto err_put_fd;
    640 	}
    641 
    642 	fd_install(fd, sync_file->file);
    643 
    644 	*p_fd = fd;
    645 	return 0;
    646 err_put_fd:
    647 	put_unused_fd(fd);
    648 	return ret;
    649 }
    650 /**
    651  * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
    652  * @file_private: drm file-private structure to set up
    653  *
    654  * Called at device open time, sets up the structure for handling refcounting
    655  * of sync objects.
    656  */
    657 void
    658 drm_syncobj_open(struct drm_file *file_private)
    659 {
    660 	idr_init_base(&file_private->syncobj_idr, 1);
    661 	spin_lock_init(&file_private->syncobj_table_lock);
    662 }
    663 
    664 static int
    665 drm_syncobj_release_handle(int id, void *ptr, void *data)
    666 {
    667 	struct drm_syncobj *syncobj = ptr;
    668 
    669 	drm_syncobj_put(syncobj);
    670 	return 0;
    671 }
    672 
    673 /**
    674  * drm_syncobj_release - release file-private sync object resources
    675  * @file_private: drm file-private structure to clean up
    676  *
    677  * Called at close time when the filp is going away.
    678  *
    679  * Releases any remaining references on objects by this filp.
    680  */
    681 void
    682 drm_syncobj_release(struct drm_file *file_private)
    683 {
    684 	idr_for_each(&file_private->syncobj_idr,
    685 		     &drm_syncobj_release_handle, file_private);
    686 	idr_destroy(&file_private->syncobj_idr);
    687 }
    688 
    689 int
    690 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
    691 			 struct drm_file *file_private)
    692 {
    693 	struct drm_syncobj_create *args = data;
    694 
    695 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
    696 		return -EOPNOTSUPP;
    697 
    698 	/* no valid flags yet */
    699 	if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
    700 		return -EINVAL;
    701 
    702 	return drm_syncobj_create_as_handle(file_private,
    703 					    &args->handle, args->flags);
    704 }
    705 
    706 int
    707 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
    708 			  struct drm_file *file_private)
    709 {
    710 	struct drm_syncobj_destroy *args = data;
    711 
    712 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
    713 		return -EOPNOTSUPP;
    714 
    715 	/* make sure padding is empty */
    716 	if (args->pad)
    717 		return -EINVAL;
    718 	return drm_syncobj_destroy(file_private, args->handle);
    719 }
    720 
    721 int
    722 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
    723 				   struct drm_file *file_private)
    724 {
    725 	struct drm_syncobj_handle *args = data;
    726 
    727 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
    728 		return -EOPNOTSUPP;
    729 
    730 	if (args->pad)
    731 		return -EINVAL;
    732 
    733 	if (args->flags != 0 &&
    734 	    args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
    735 		return -EINVAL;
    736 
    737 	if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
    738 		return drm_syncobj_export_sync_file(file_private, args->handle,
    739 						    &args->fd);
    740 
    741 	return drm_syncobj_handle_to_fd(file_private, args->handle,
    742 					&args->fd);
    743 }
    744 
    745 int
    746 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
    747 				   struct drm_file *file_private)
    748 {
    749 	struct drm_syncobj_handle *args = data;
    750 
    751 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
    752 		return -EOPNOTSUPP;
    753 
    754 	if (args->pad)
    755 		return -EINVAL;
    756 
    757 	if (args->flags != 0 &&
    758 	    args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
    759 		return -EINVAL;
    760 
    761 	if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
    762 		return drm_syncobj_import_sync_file_fence(file_private,
    763 							  args->fd,
    764 							  args->handle);
    765 
    766 	return drm_syncobj_fd_to_handle(file_private, args->fd,
    767 					&args->handle);
    768 }
    769 
    770 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
    771 					    struct drm_syncobj_transfer *args)
    772 {
    773 	struct drm_syncobj *timeline_syncobj = NULL;
    774 	struct dma_fence *fence;
    775 	struct dma_fence_chain *chain;
    776 	int ret;
    777 
    778 	timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
    779 	if (!timeline_syncobj) {
    780 		return -ENOENT;
    781 	}
    782 	ret = drm_syncobj_find_fence(file_private, args->src_handle,
    783 				     args->src_point, args->flags,
    784 				     &fence);
    785 	if (ret)
    786 		goto err;
    787 	chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
    788 	if (!chain) {
    789 		ret = -ENOMEM;
    790 		goto err1;
    791 	}
    792 	drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
    793 err1:
    794 	dma_fence_put(fence);
    795 err:
    796 	drm_syncobj_put(timeline_syncobj);
    797 
    798 	return ret;
    799 }
    800 
    801 static int
    802 drm_syncobj_transfer_to_binary(struct drm_file *file_private,
    803 			       struct drm_syncobj_transfer *args)
    804 {
    805 	struct drm_syncobj *binary_syncobj = NULL;
    806 	struct dma_fence *fence;
    807 	int ret;
    808 
    809 	binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
    810 	if (!binary_syncobj)
    811 		return -ENOENT;
    812 	ret = drm_syncobj_find_fence(file_private, args->src_handle,
    813 				     args->src_point, args->flags, &fence);
    814 	if (ret)
    815 		goto err;
    816 	drm_syncobj_replace_fence(binary_syncobj, fence);
    817 	dma_fence_put(fence);
    818 err:
    819 	drm_syncobj_put(binary_syncobj);
    820 
    821 	return ret;
    822 }
    823 int
    824 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
    825 			   struct drm_file *file_private)
    826 {
    827 	struct drm_syncobj_transfer *args = data;
    828 	int ret;
    829 
    830 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
    831 		return -EOPNOTSUPP;
    832 
    833 	if (args->pad)
    834 		return -EINVAL;
    835 
    836 	if (args->dst_point)
    837 		ret = drm_syncobj_transfer_to_timeline(file_private, args);
    838 	else
    839 		ret = drm_syncobj_transfer_to_binary(file_private, args);
    840 
    841 	return ret;
    842 }
    843 
    844 static void syncobj_wait_fence_func(struct dma_fence *fence,
    845 				    struct dma_fence_cb *cb)
    846 {
    847 	struct syncobj_wait_entry *wait =
    848 		container_of(cb, struct syncobj_wait_entry, fence_cb);
    849 
    850 	wake_up_process(wait->task);
    851 }
    852 
    853 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
    854 				      struct syncobj_wait_entry *wait)
    855 {
    856 	struct dma_fence *fence;
    857 
    858 	/* This happens inside the syncobj lock */
    859 	fence = rcu_dereference_protected(syncobj->fence,
    860 					  lockdep_is_held(&syncobj->lock));
    861 	dma_fence_get(fence);
    862 	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
    863 		dma_fence_put(fence);
    864 		return;
    865 	} else if (!fence) {
    866 		wait->fence = dma_fence_get_stub();
    867 	} else {
    868 		wait->fence = fence;
    869 	}
    870 
    871 	wake_up_process(wait->task);
    872 	list_del_init(&wait->node);
    873 }
    874 
    875 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
    876 						  void __user *user_points,
    877 						  uint32_t count,
    878 						  uint32_t flags,
    879 						  signed long timeout,
    880 						  uint32_t *idx)
    881 {
    882 	struct syncobj_wait_entry *entries;
    883 	struct dma_fence *fence;
    884 	uint64_t *points;
    885 	uint32_t signaled_count, i;
    886 
    887 	points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
    888 	if (points == NULL)
    889 		return -ENOMEM;
    890 
    891 	if (!user_points) {
    892 		memset(points, 0, count * sizeof(uint64_t));
    893 
    894 	} else if (copy_from_user(points, user_points,
    895 				  sizeof(uint64_t) * count)) {
    896 		timeout = -EFAULT;
    897 		goto err_free_points;
    898 	}
    899 
    900 	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
    901 	if (!entries) {
    902 		timeout = -ENOMEM;
    903 		goto err_free_points;
    904 	}
    905 	/* Walk the list of sync objects and initialize entries.  We do
    906 	 * this up-front so that we can properly return -EINVAL if there is
    907 	 * a syncobj with a missing fence and then never have the chance of
    908 	 * returning -EINVAL again.
    909 	 */
    910 	signaled_count = 0;
    911 	for (i = 0; i < count; ++i) {
    912 		struct dma_fence *fence;
    913 
    914 		entries[i].task = current;
    915 		entries[i].point = points[i];
    916 		fence = drm_syncobj_fence_get(syncobjs[i]);
    917 		if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
    918 			dma_fence_put(fence);
    919 			if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
    920 				continue;
    921 			} else {
    922 				timeout = -EINVAL;
    923 				goto cleanup_entries;
    924 			}
    925 		}
    926 
    927 		if (fence)
    928 			entries[i].fence = fence;
    929 		else
    930 			entries[i].fence = dma_fence_get_stub();
    931 
    932 		if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
    933 		    dma_fence_is_signaled(entries[i].fence)) {
    934 			if (signaled_count == 0 && idx)
    935 				*idx = i;
    936 			signaled_count++;
    937 		}
    938 	}
    939 
    940 	if (signaled_count == count ||
    941 	    (signaled_count > 0 &&
    942 	     !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
    943 		goto cleanup_entries;
    944 
    945 	/* There's a very annoying laxness in the dma_fence API here, in
    946 	 * that backends are not required to automatically report when a
    947 	 * fence is signaled prior to fence->ops->enable_signaling() being
    948 	 * called.  So here if we fail to match signaled_count, we need to
    949 	 * fallthough and try a 0 timeout wait!
    950 	 */
    951 
    952 	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
    953 		for (i = 0; i < count; ++i)
    954 			drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
    955 	}
    956 
    957 	do {
    958 		set_current_state(TASK_INTERRUPTIBLE);
    959 
    960 		signaled_count = 0;
    961 		for (i = 0; i < count; ++i) {
    962 			fence = entries[i].fence;
    963 			if (!fence)
    964 				continue;
    965 
    966 			if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
    967 			    dma_fence_is_signaled(fence) ||
    968 			    (!entries[i].fence_cb.func &&
    969 			     dma_fence_add_callback(fence,
    970 						    &entries[i].fence_cb,
    971 						    syncobj_wait_fence_func))) {
    972 				/* The fence has been signaled */
    973 				if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
    974 					signaled_count++;
    975 				} else {
    976 					if (idx)
    977 						*idx = i;
    978 					goto done_waiting;
    979 				}
    980 			}
    981 		}
    982 
    983 		if (signaled_count == count)
    984 			goto done_waiting;
    985 
    986 		if (timeout == 0) {
    987 			timeout = -ETIME;
    988 			goto done_waiting;
    989 		}
    990 
    991 		if (signal_pending(current)) {
    992 			timeout = -ERESTARTSYS;
    993 			goto done_waiting;
    994 		}
    995 
    996 		timeout = schedule_timeout(timeout);
    997 	} while (1);
    998 
    999 done_waiting:
   1000 	__set_current_state(TASK_RUNNING);
   1001 
   1002 cleanup_entries:
   1003 	for (i = 0; i < count; ++i) {
   1004 		drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
   1005 		if (entries[i].fence_cb.func)
   1006 			dma_fence_remove_callback(entries[i].fence,
   1007 						  &entries[i].fence_cb);
   1008 		dma_fence_put(entries[i].fence);
   1009 	}
   1010 	kfree(entries);
   1011 
   1012 err_free_points:
   1013 	kfree(points);
   1014 
   1015 	return timeout;
   1016 }
   1017 
   1018 /**
   1019  * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
   1020  *
   1021  * @timeout_nsec: timeout nsec component in ns, 0 for poll
   1022  *
   1023  * Calculate the timeout in jiffies from an absolute time in sec/nsec.
   1024  */
   1025 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
   1026 {
   1027 	ktime_t abs_timeout, now;
   1028 	u64 timeout_ns, timeout_jiffies64;
   1029 
   1030 	/* make 0 timeout means poll - absolute 0 doesn't seem valid */
   1031 	if (timeout_nsec == 0)
   1032 		return 0;
   1033 
   1034 	abs_timeout = ns_to_ktime(timeout_nsec);
   1035 	now = ktime_get();
   1036 
   1037 	if (!ktime_after(abs_timeout, now))
   1038 		return 0;
   1039 
   1040 	timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
   1041 
   1042 	timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
   1043 	/*  clamp timeout to avoid infinite timeout */
   1044 	if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
   1045 		return MAX_SCHEDULE_TIMEOUT - 1;
   1046 
   1047 	return timeout_jiffies64 + 1;
   1048 }
   1049 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
   1050 
   1051 static int drm_syncobj_array_wait(struct drm_device *dev,
   1052 				  struct drm_file *file_private,
   1053 				  struct drm_syncobj_wait *wait,
   1054 				  struct drm_syncobj_timeline_wait *timeline_wait,
   1055 				  struct drm_syncobj **syncobjs, bool timeline)
   1056 {
   1057 	signed long timeout = 0;
   1058 	uint32_t first = ~0;
   1059 
   1060 	if (!timeline) {
   1061 		timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
   1062 		timeout = drm_syncobj_array_wait_timeout(syncobjs,
   1063 							 NULL,
   1064 							 wait->count_handles,
   1065 							 wait->flags,
   1066 							 timeout, &first);
   1067 		if (timeout < 0)
   1068 			return timeout;
   1069 		wait->first_signaled = first;
   1070 	} else {
   1071 		timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
   1072 		timeout = drm_syncobj_array_wait_timeout(syncobjs,
   1073 							 u64_to_user_ptr(timeline_wait->points),
   1074 							 timeline_wait->count_handles,
   1075 							 timeline_wait->flags,
   1076 							 timeout, &first);
   1077 		if (timeout < 0)
   1078 			return timeout;
   1079 		timeline_wait->first_signaled = first;
   1080 	}
   1081 	return 0;
   1082 }
   1083 
   1084 static int drm_syncobj_array_find(struct drm_file *file_private,
   1085 				  void __user *user_handles,
   1086 				  uint32_t count_handles,
   1087 				  struct drm_syncobj ***syncobjs_out)
   1088 {
   1089 	uint32_t i, *handles;
   1090 	struct drm_syncobj **syncobjs;
   1091 	int ret;
   1092 
   1093 	handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
   1094 	if (handles == NULL)
   1095 		return -ENOMEM;
   1096 
   1097 	if (copy_from_user(handles, user_handles,
   1098 			   sizeof(uint32_t) * count_handles)) {
   1099 		ret = -EFAULT;
   1100 		goto err_free_handles;
   1101 	}
   1102 
   1103 	syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
   1104 	if (syncobjs == NULL) {
   1105 		ret = -ENOMEM;
   1106 		goto err_free_handles;
   1107 	}
   1108 
   1109 	for (i = 0; i < count_handles; i++) {
   1110 		syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
   1111 		if (!syncobjs[i]) {
   1112 			ret = -ENOENT;
   1113 			goto err_put_syncobjs;
   1114 		}
   1115 	}
   1116 
   1117 	kfree(handles);
   1118 	*syncobjs_out = syncobjs;
   1119 	return 0;
   1120 
   1121 err_put_syncobjs:
   1122 	while (i-- > 0)
   1123 		drm_syncobj_put(syncobjs[i]);
   1124 	kfree(syncobjs);
   1125 err_free_handles:
   1126 	kfree(handles);
   1127 
   1128 	return ret;
   1129 }
   1130 
   1131 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
   1132 				   uint32_t count)
   1133 {
   1134 	uint32_t i;
   1135 	for (i = 0; i < count; i++)
   1136 		drm_syncobj_put(syncobjs[i]);
   1137 	kfree(syncobjs);
   1138 }
   1139 
   1140 int
   1141 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
   1142 		       struct drm_file *file_private)
   1143 {
   1144 	struct drm_syncobj_wait *args = data;
   1145 	struct drm_syncobj **syncobjs;
   1146 	int ret = 0;
   1147 
   1148 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
   1149 		return -EOPNOTSUPP;
   1150 
   1151 	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
   1152 			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
   1153 		return -EINVAL;
   1154 
   1155 	if (args->count_handles == 0)
   1156 		return -EINVAL;
   1157 
   1158 	ret = drm_syncobj_array_find(file_private,
   1159 				     u64_to_user_ptr(args->handles),
   1160 				     args->count_handles,
   1161 				     &syncobjs);
   1162 	if (ret < 0)
   1163 		return ret;
   1164 
   1165 	ret = drm_syncobj_array_wait(dev, file_private,
   1166 				     args, NULL, syncobjs, false);
   1167 
   1168 	drm_syncobj_array_free(syncobjs, args->count_handles);
   1169 
   1170 	return ret;
   1171 }
   1172 
   1173 int
   1174 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
   1175 				struct drm_file *file_private)
   1176 {
   1177 	struct drm_syncobj_timeline_wait *args = data;
   1178 	struct drm_syncobj **syncobjs;
   1179 	int ret = 0;
   1180 
   1181 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
   1182 		return -EOPNOTSUPP;
   1183 
   1184 	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
   1185 			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
   1186 			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
   1187 		return -EINVAL;
   1188 
   1189 	if (args->count_handles == 0)
   1190 		return -EINVAL;
   1191 
   1192 	ret = drm_syncobj_array_find(file_private,
   1193 				     u64_to_user_ptr(args->handles),
   1194 				     args->count_handles,
   1195 				     &syncobjs);
   1196 	if (ret < 0)
   1197 		return ret;
   1198 
   1199 	ret = drm_syncobj_array_wait(dev, file_private,
   1200 				     NULL, args, syncobjs, true);
   1201 
   1202 	drm_syncobj_array_free(syncobjs, args->count_handles);
   1203 
   1204 	return ret;
   1205 }
   1206 
   1207 
   1208 int
   1209 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
   1210 			struct drm_file *file_private)
   1211 {
   1212 	struct drm_syncobj_array *args = data;
   1213 	struct drm_syncobj **syncobjs;
   1214 	uint32_t i;
   1215 	int ret;
   1216 
   1217 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
   1218 		return -EOPNOTSUPP;
   1219 
   1220 	if (args->pad != 0)
   1221 		return -EINVAL;
   1222 
   1223 	if (args->count_handles == 0)
   1224 		return -EINVAL;
   1225 
   1226 	ret = drm_syncobj_array_find(file_private,
   1227 				     u64_to_user_ptr(args->handles),
   1228 				     args->count_handles,
   1229 				     &syncobjs);
   1230 	if (ret < 0)
   1231 		return ret;
   1232 
   1233 	for (i = 0; i < args->count_handles; i++)
   1234 		drm_syncobj_replace_fence(syncobjs[i], NULL);
   1235 
   1236 	drm_syncobj_array_free(syncobjs, args->count_handles);
   1237 
   1238 	return 0;
   1239 }
   1240 
   1241 int
   1242 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
   1243 			 struct drm_file *file_private)
   1244 {
   1245 	struct drm_syncobj_array *args = data;
   1246 	struct drm_syncobj **syncobjs;
   1247 	uint32_t i;
   1248 	int ret;
   1249 
   1250 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
   1251 		return -EOPNOTSUPP;
   1252 
   1253 	if (args->pad != 0)
   1254 		return -EINVAL;
   1255 
   1256 	if (args->count_handles == 0)
   1257 		return -EINVAL;
   1258 
   1259 	ret = drm_syncobj_array_find(file_private,
   1260 				     u64_to_user_ptr(args->handles),
   1261 				     args->count_handles,
   1262 				     &syncobjs);
   1263 	if (ret < 0)
   1264 		return ret;
   1265 
   1266 	for (i = 0; i < args->count_handles; i++)
   1267 		drm_syncobj_assign_null_handle(syncobjs[i]);
   1268 
   1269 	drm_syncobj_array_free(syncobjs, args->count_handles);
   1270 
   1271 	return ret;
   1272 }
   1273 
   1274 int
   1275 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
   1276 				  struct drm_file *file_private)
   1277 {
   1278 	struct drm_syncobj_timeline_array *args = data;
   1279 	struct drm_syncobj **syncobjs;
   1280 	struct dma_fence_chain **chains;
   1281 	uint64_t *points;
   1282 	uint32_t i, j;
   1283 	int ret;
   1284 
   1285 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
   1286 		return -EOPNOTSUPP;
   1287 
   1288 	if (args->flags != 0)
   1289 		return -EINVAL;
   1290 
   1291 	if (args->count_handles == 0)
   1292 		return -EINVAL;
   1293 
   1294 	ret = drm_syncobj_array_find(file_private,
   1295 				     u64_to_user_ptr(args->handles),
   1296 				     args->count_handles,
   1297 				     &syncobjs);
   1298 	if (ret < 0)
   1299 		return ret;
   1300 
   1301 	points = kmalloc_array(args->count_handles, sizeof(*points),
   1302 			       GFP_KERNEL);
   1303 	if (!points) {
   1304 		ret = -ENOMEM;
   1305 		goto out;
   1306 	}
   1307 	if (!u64_to_user_ptr(args->points)) {
   1308 		memset(points, 0, args->count_handles * sizeof(uint64_t));
   1309 	} else if (copy_from_user(points, u64_to_user_ptr(args->points),
   1310 				  sizeof(uint64_t) * args->count_handles)) {
   1311 		ret = -EFAULT;
   1312 		goto err_points;
   1313 	}
   1314 
   1315 	chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
   1316 	if (!chains) {
   1317 		ret = -ENOMEM;
   1318 		goto err_points;
   1319 	}
   1320 	for (i = 0; i < args->count_handles; i++) {
   1321 		chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
   1322 		if (!chains[i]) {
   1323 			for (j = 0; j < i; j++)
   1324 				kfree(chains[j]);
   1325 			ret = -ENOMEM;
   1326 			goto err_chains;
   1327 		}
   1328 	}
   1329 
   1330 	for (i = 0; i < args->count_handles; i++) {
   1331 		struct dma_fence *fence = dma_fence_get_stub();
   1332 
   1333 		drm_syncobj_add_point(syncobjs[i], chains[i],
   1334 				      fence, points[i]);
   1335 		dma_fence_put(fence);
   1336 	}
   1337 err_chains:
   1338 	kfree(chains);
   1339 err_points:
   1340 	kfree(points);
   1341 out:
   1342 	drm_syncobj_array_free(syncobjs, args->count_handles);
   1343 
   1344 	return ret;
   1345 }
   1346 
   1347 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
   1348 			    struct drm_file *file_private)
   1349 {
   1350 	struct drm_syncobj_timeline_array *args = data;
   1351 	struct drm_syncobj **syncobjs;
   1352 	uint64_t __user *points = u64_to_user_ptr(args->points);
   1353 	uint32_t i;
   1354 	int ret;
   1355 
   1356 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
   1357 		return -EOPNOTSUPP;
   1358 
   1359 	if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
   1360 		return -EINVAL;
   1361 
   1362 	if (args->count_handles == 0)
   1363 		return -EINVAL;
   1364 
   1365 	ret = drm_syncobj_array_find(file_private,
   1366 				     u64_to_user_ptr(args->handles),
   1367 				     args->count_handles,
   1368 				     &syncobjs);
   1369 	if (ret < 0)
   1370 		return ret;
   1371 
   1372 	for (i = 0; i < args->count_handles; i++) {
   1373 		struct dma_fence_chain *chain;
   1374 		struct dma_fence *fence;
   1375 		uint64_t point;
   1376 
   1377 		fence = drm_syncobj_fence_get(syncobjs[i]);
   1378 		chain = to_dma_fence_chain(fence);
   1379 		if (chain) {
   1380 			struct dma_fence *iter, *last_signaled =
   1381 				dma_fence_get(fence);
   1382 
   1383 			if (args->flags &
   1384 			    DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
   1385 				point = fence->seqno;
   1386 			} else {
   1387 				dma_fence_chain_for_each(iter, fence) {
   1388 					if (iter->context != fence->context) {
   1389 						dma_fence_put(iter);
   1390 						/* It is most likely that timeline has
   1391 						* unorder points. */
   1392 						break;
   1393 					}
   1394 					dma_fence_put(last_signaled);
   1395 					last_signaled = dma_fence_get(iter);
   1396 				}
   1397 				point = dma_fence_is_signaled(last_signaled) ?
   1398 					last_signaled->seqno :
   1399 					to_dma_fence_chain(last_signaled)->prev_seqno;
   1400 			}
   1401 			dma_fence_put(last_signaled);
   1402 		} else {
   1403 			point = 0;
   1404 		}
   1405 		dma_fence_put(fence);
   1406 		ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
   1407 		ret = ret ? -EFAULT : 0;
   1408 		if (ret)
   1409 			break;
   1410 	}
   1411 	drm_syncobj_array_free(syncobjs, args->count_handles);
   1412 
   1413 	return ret;
   1414 }
   1415