1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2015 Intel Corporation
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b8e80941Smrg * IN THE SOFTWARE.
22b8e80941Smrg */
23b8e80941Smrg
24b8e80941Smrg#include <sys/ioctl.h>
25b8e80941Smrg#include <sys/types.h>
26b8e80941Smrg#include <sys/mman.h>
27b8e80941Smrg#include <string.h>
28b8e80941Smrg#include <errno.h>
29b8e80941Smrg#include <unistd.h>
30b8e80941Smrg#include <fcntl.h>
31b8e80941Smrg
32b8e80941Smrg#include "anv_private.h"
33b8e80941Smrg#include "common/gen_defines.h"
34b8e80941Smrg
35b8e80941Smrgstatic int
36b8e80941Smrganv_ioctl(int fd, unsigned long request, void *arg)
37b8e80941Smrg{
38b8e80941Smrg   int ret;
39b8e80941Smrg
40b8e80941Smrg   do {
41b8e80941Smrg      ret = ioctl(fd, request, arg);
42b8e80941Smrg   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
43b8e80941Smrg
44b8e80941Smrg   return ret;
45b8e80941Smrg}
46b8e80941Smrg
47b8e80941Smrg/**
48b8e80941Smrg * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
49b8e80941Smrg *
50b8e80941Smrg * Return gem handle, or 0 on failure. Gem handles are never 0.
51b8e80941Smrg */
52b8e80941Smrguint32_t
53b8e80941Smrganv_gem_create(struct anv_device *device, uint64_t size)
54b8e80941Smrg{
55b8e80941Smrg   struct drm_i915_gem_create gem_create = {
56b8e80941Smrg      .size = size,
57b8e80941Smrg   };
58b8e80941Smrg
59b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
60b8e80941Smrg   if (ret != 0) {
61b8e80941Smrg      /* FIXME: What do we do if this fails? */
62b8e80941Smrg      return 0;
63b8e80941Smrg   }
64b8e80941Smrg
65b8e80941Smrg   return gem_create.handle;
66b8e80941Smrg}
67b8e80941Smrg
68b8e80941Smrgvoid
69b8e80941Smrganv_gem_close(struct anv_device *device, uint32_t gem_handle)
70b8e80941Smrg{
71b8e80941Smrg   struct drm_gem_close close = {
72b8e80941Smrg      .handle = gem_handle,
73b8e80941Smrg   };
74b8e80941Smrg
75b8e80941Smrg   anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
76b8e80941Smrg}
77b8e80941Smrg
78b8e80941Smrg/**
79b8e80941Smrg * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
80b8e80941Smrg */
81b8e80941Smrgvoid*
82b8e80941Smrganv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
83b8e80941Smrg             uint64_t offset, uint64_t size, uint32_t flags)
84b8e80941Smrg{
85b8e80941Smrg   struct drm_i915_gem_mmap gem_mmap = {
86b8e80941Smrg      .handle = gem_handle,
87b8e80941Smrg      .offset = offset,
88b8e80941Smrg      .size = size,
89b8e80941Smrg      .flags = flags,
90b8e80941Smrg   };
91b8e80941Smrg
92b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
93b8e80941Smrg   if (ret != 0)
94b8e80941Smrg      return MAP_FAILED;
95b8e80941Smrg
96b8e80941Smrg   VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
97b8e80941Smrg   return (void *)(uintptr_t) gem_mmap.addr_ptr;
98b8e80941Smrg}
99b8e80941Smrg
100b8e80941Smrg/* This is just a wrapper around munmap, but it also notifies valgrind that
101b8e80941Smrg * this map is no longer valid.  Pair this with anv_gem_mmap().
102b8e80941Smrg */
103b8e80941Smrgvoid
104b8e80941Smrganv_gem_munmap(void *p, uint64_t size)
105b8e80941Smrg{
106b8e80941Smrg   VG(VALGRIND_FREELIKE_BLOCK(p, 0));
107b8e80941Smrg   munmap(p, size);
108b8e80941Smrg}
109b8e80941Smrg
110b8e80941Smrguint32_t
111b8e80941Smrganv_gem_userptr(struct anv_device *device, void *mem, size_t size)
112b8e80941Smrg{
113b8e80941Smrg   struct drm_i915_gem_userptr userptr = {
114b8e80941Smrg      .user_ptr = (__u64)((unsigned long) mem),
115b8e80941Smrg      .user_size = size,
116b8e80941Smrg      .flags = 0,
117b8e80941Smrg   };
118b8e80941Smrg
119b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
120b8e80941Smrg   if (ret == -1)
121b8e80941Smrg      return 0;
122b8e80941Smrg
123b8e80941Smrg   return userptr.handle;
124b8e80941Smrg}
125b8e80941Smrg
126b8e80941Smrgint
127b8e80941Smrganv_gem_set_caching(struct anv_device *device,
128b8e80941Smrg                    uint32_t gem_handle, uint32_t caching)
129b8e80941Smrg{
130b8e80941Smrg   struct drm_i915_gem_caching gem_caching = {
131b8e80941Smrg      .handle = gem_handle,
132b8e80941Smrg      .caching = caching,
133b8e80941Smrg   };
134b8e80941Smrg
135b8e80941Smrg   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
136b8e80941Smrg}
137b8e80941Smrg
138b8e80941Smrgint
139b8e80941Smrganv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
140b8e80941Smrg                   uint32_t read_domains, uint32_t write_domain)
141b8e80941Smrg{
142b8e80941Smrg   struct drm_i915_gem_set_domain gem_set_domain = {
143b8e80941Smrg      .handle = gem_handle,
144b8e80941Smrg      .read_domains = read_domains,
145b8e80941Smrg      .write_domain = write_domain,
146b8e80941Smrg   };
147b8e80941Smrg
148b8e80941Smrg   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
149b8e80941Smrg}
150b8e80941Smrg
151b8e80941Smrg/**
152b8e80941Smrg * Returns 0, 1, or negative to indicate error
153b8e80941Smrg */
154b8e80941Smrgint
155b8e80941Smrganv_gem_busy(struct anv_device *device, uint32_t gem_handle)
156b8e80941Smrg{
157b8e80941Smrg   struct drm_i915_gem_busy busy = {
158b8e80941Smrg      .handle = gem_handle,
159b8e80941Smrg   };
160b8e80941Smrg
161b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
162b8e80941Smrg   if (ret < 0)
163b8e80941Smrg      return ret;
164b8e80941Smrg
165b8e80941Smrg   return busy.busy != 0;
166b8e80941Smrg}
167b8e80941Smrg
168b8e80941Smrg/**
169b8e80941Smrg * On error, \a timeout_ns holds the remaining time.
170b8e80941Smrg */
171b8e80941Smrgint
172b8e80941Smrganv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
173b8e80941Smrg{
174b8e80941Smrg   struct drm_i915_gem_wait wait = {
175b8e80941Smrg      .bo_handle = gem_handle,
176b8e80941Smrg      .timeout_ns = *timeout_ns,
177b8e80941Smrg      .flags = 0,
178b8e80941Smrg   };
179b8e80941Smrg
180b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
181b8e80941Smrg   *timeout_ns = wait.timeout_ns;
182b8e80941Smrg
183b8e80941Smrg   return ret;
184b8e80941Smrg}
185b8e80941Smrg
186b8e80941Smrgint
187b8e80941Smrganv_gem_execbuffer(struct anv_device *device,
188b8e80941Smrg                   struct drm_i915_gem_execbuffer2 *execbuf)
189b8e80941Smrg{
190b8e80941Smrg   if (execbuf->flags & I915_EXEC_FENCE_OUT)
191b8e80941Smrg      return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
192b8e80941Smrg   else
193b8e80941Smrg      return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
194b8e80941Smrg}
195b8e80941Smrg
196b8e80941Smrg/** Return -1 on error. */
197b8e80941Smrgint
198b8e80941Smrganv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
199b8e80941Smrg{
200b8e80941Smrg   struct drm_i915_gem_get_tiling get_tiling = {
201b8e80941Smrg      .handle = gem_handle,
202b8e80941Smrg   };
203b8e80941Smrg
204b8e80941Smrg   if (anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
205b8e80941Smrg      assert(!"Failed to get BO tiling");
206b8e80941Smrg      return -1;
207b8e80941Smrg   }
208b8e80941Smrg
209b8e80941Smrg   return get_tiling.tiling_mode;
210b8e80941Smrg}
211b8e80941Smrg
212b8e80941Smrgint
213b8e80941Smrganv_gem_set_tiling(struct anv_device *device,
214b8e80941Smrg                   uint32_t gem_handle, uint32_t stride, uint32_t tiling)
215b8e80941Smrg{
216b8e80941Smrg   int ret;
217b8e80941Smrg
218b8e80941Smrg   /* set_tiling overwrites the input on the error path, so we have to open
219b8e80941Smrg    * code anv_ioctl.
220b8e80941Smrg    */
221b8e80941Smrg   do {
222b8e80941Smrg      struct drm_i915_gem_set_tiling set_tiling = {
223b8e80941Smrg         .handle = gem_handle,
224b8e80941Smrg         .tiling_mode = tiling,
225b8e80941Smrg         .stride = stride,
226b8e80941Smrg      };
227b8e80941Smrg
228b8e80941Smrg      ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
229b8e80941Smrg   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
230b8e80941Smrg
231b8e80941Smrg   return ret;
232b8e80941Smrg}
233b8e80941Smrg
234b8e80941Smrgint
235b8e80941Smrganv_gem_get_param(int fd, uint32_t param)
236b8e80941Smrg{
237b8e80941Smrg   int tmp;
238b8e80941Smrg
239b8e80941Smrg   drm_i915_getparam_t gp = {
240b8e80941Smrg      .param = param,
241b8e80941Smrg      .value = &tmp,
242b8e80941Smrg   };
243b8e80941Smrg
244b8e80941Smrg   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
245b8e80941Smrg   if (ret == 0)
246b8e80941Smrg      return tmp;
247b8e80941Smrg
248b8e80941Smrg   return 0;
249b8e80941Smrg}
250b8e80941Smrg
251b8e80941Smrgbool
252b8e80941Smrganv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
253b8e80941Smrg{
254b8e80941Smrg   struct drm_gem_close close;
255b8e80941Smrg   int ret;
256b8e80941Smrg
257b8e80941Smrg   struct drm_i915_gem_create gem_create = {
258b8e80941Smrg      .size = 4096,
259b8e80941Smrg   };
260b8e80941Smrg
261b8e80941Smrg   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
262b8e80941Smrg      assert(!"Failed to create GEM BO");
263b8e80941Smrg      return false;
264b8e80941Smrg   }
265b8e80941Smrg
266b8e80941Smrg   bool swizzled = false;
267b8e80941Smrg
268b8e80941Smrg   /* set_tiling overwrites the input on the error path, so we have to open
269b8e80941Smrg    * code anv_ioctl.
270b8e80941Smrg    */
271b8e80941Smrg   do {
272b8e80941Smrg      struct drm_i915_gem_set_tiling set_tiling = {
273b8e80941Smrg         .handle = gem_create.handle,
274b8e80941Smrg         .tiling_mode = tiling,
275b8e80941Smrg         .stride = tiling == I915_TILING_X ? 512 : 128,
276b8e80941Smrg      };
277b8e80941Smrg
278b8e80941Smrg      ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
279b8e80941Smrg   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
280b8e80941Smrg
281b8e80941Smrg   if (ret != 0) {
282b8e80941Smrg      assert(!"Failed to set BO tiling");
283b8e80941Smrg      goto close_and_return;
284b8e80941Smrg   }
285b8e80941Smrg
286b8e80941Smrg   struct drm_i915_gem_get_tiling get_tiling = {
287b8e80941Smrg      .handle = gem_create.handle,
288b8e80941Smrg   };
289b8e80941Smrg
290b8e80941Smrg   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
291b8e80941Smrg      assert(!"Failed to get BO tiling");
292b8e80941Smrg      goto close_and_return;
293b8e80941Smrg   }
294b8e80941Smrg
295b8e80941Smrg   swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
296b8e80941Smrg
297b8e80941Smrgclose_and_return:
298b8e80941Smrg
299b8e80941Smrg   memset(&close, 0, sizeof(close));
300b8e80941Smrg   close.handle = gem_create.handle;
301b8e80941Smrg   anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
302b8e80941Smrg
303b8e80941Smrg   return swizzled;
304b8e80941Smrg}
305b8e80941Smrg
306b8e80941Smrgbool
307b8e80941Smrganv_gem_has_context_priority(int fd)
308b8e80941Smrg{
309b8e80941Smrg   return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
310b8e80941Smrg                                     GEN_CONTEXT_MEDIUM_PRIORITY);
311b8e80941Smrg}
312b8e80941Smrg
313b8e80941Smrgint
314b8e80941Smrganv_gem_create_context(struct anv_device *device)
315b8e80941Smrg{
316b8e80941Smrg   struct drm_i915_gem_context_create create = { 0 };
317b8e80941Smrg
318b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
319b8e80941Smrg   if (ret == -1)
320b8e80941Smrg      return -1;
321b8e80941Smrg
322b8e80941Smrg   return create.ctx_id;
323b8e80941Smrg}
324b8e80941Smrg
325b8e80941Smrgint
326b8e80941Smrganv_gem_destroy_context(struct anv_device *device, int context)
327b8e80941Smrg{
328b8e80941Smrg   struct drm_i915_gem_context_destroy destroy = {
329b8e80941Smrg      .ctx_id = context,
330b8e80941Smrg   };
331b8e80941Smrg
332b8e80941Smrg   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
333b8e80941Smrg}
334b8e80941Smrg
335b8e80941Smrgint
336b8e80941Smrganv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
337b8e80941Smrg{
338b8e80941Smrg   struct drm_i915_gem_context_param p = {
339b8e80941Smrg      .ctx_id = context,
340b8e80941Smrg      .param = param,
341b8e80941Smrg      .value = value,
342b8e80941Smrg   };
343b8e80941Smrg   int err = 0;
344b8e80941Smrg
345b8e80941Smrg   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
346b8e80941Smrg      err = -errno;
347b8e80941Smrg   return err;
348b8e80941Smrg}
349b8e80941Smrg
350b8e80941Smrgint
351b8e80941Smrganv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
352b8e80941Smrg{
353b8e80941Smrg   struct drm_i915_gem_context_param gp = {
354b8e80941Smrg      .ctx_id = context,
355b8e80941Smrg      .param = param,
356b8e80941Smrg   };
357b8e80941Smrg
358b8e80941Smrg   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
359b8e80941Smrg   if (ret == -1)
360b8e80941Smrg      return -1;
361b8e80941Smrg
362b8e80941Smrg   *value = gp.value;
363b8e80941Smrg   return 0;
364b8e80941Smrg}
365b8e80941Smrg
366b8e80941Smrgint
367b8e80941Smrganv_gem_get_aperture(int fd, uint64_t *size)
368b8e80941Smrg{
369b8e80941Smrg   struct drm_i915_gem_get_aperture aperture = { 0 };
370b8e80941Smrg
371b8e80941Smrg   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
372b8e80941Smrg   if (ret == -1)
373b8e80941Smrg      return -1;
374b8e80941Smrg
375b8e80941Smrg   *size = aperture.aper_available_size;
376b8e80941Smrg
377b8e80941Smrg   return 0;
378b8e80941Smrg}
379b8e80941Smrg
380b8e80941Smrgint
381b8e80941Smrganv_gem_gpu_get_reset_stats(struct anv_device *device,
382b8e80941Smrg                            uint32_t *active, uint32_t *pending)
383b8e80941Smrg{
384b8e80941Smrg   struct drm_i915_reset_stats stats = {
385b8e80941Smrg      .ctx_id = device->context_id,
386b8e80941Smrg   };
387b8e80941Smrg
388b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
389b8e80941Smrg   if (ret == 0) {
390b8e80941Smrg      *active = stats.batch_active;
391b8e80941Smrg      *pending = stats.batch_pending;
392b8e80941Smrg   }
393b8e80941Smrg
394b8e80941Smrg   return ret;
395b8e80941Smrg}
396b8e80941Smrg
397b8e80941Smrgint
398b8e80941Smrganv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
399b8e80941Smrg{
400b8e80941Smrg   struct drm_prime_handle args = {
401b8e80941Smrg      .handle = gem_handle,
402b8e80941Smrg      .flags = DRM_CLOEXEC,
403b8e80941Smrg   };
404b8e80941Smrg
405b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
406b8e80941Smrg   if (ret == -1)
407b8e80941Smrg      return -1;
408b8e80941Smrg
409b8e80941Smrg   return args.fd;
410b8e80941Smrg}
411b8e80941Smrg
412b8e80941Smrguint32_t
413b8e80941Smrganv_gem_fd_to_handle(struct anv_device *device, int fd)
414b8e80941Smrg{
415b8e80941Smrg   struct drm_prime_handle args = {
416b8e80941Smrg      .fd = fd,
417b8e80941Smrg   };
418b8e80941Smrg
419b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
420b8e80941Smrg   if (ret == -1)
421b8e80941Smrg      return 0;
422b8e80941Smrg
423b8e80941Smrg   return args.handle;
424b8e80941Smrg}
425b8e80941Smrg
426b8e80941Smrgint
427b8e80941Smrganv_gem_reg_read(struct anv_device *device, uint32_t offset, uint64_t *result)
428b8e80941Smrg{
429b8e80941Smrg   struct drm_i915_reg_read args = {
430b8e80941Smrg      .offset = offset
431b8e80941Smrg   };
432b8e80941Smrg
433b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_REG_READ, &args);
434b8e80941Smrg
435b8e80941Smrg   *result = args.val;
436b8e80941Smrg   return ret;
437b8e80941Smrg}
438b8e80941Smrg
439b8e80941Smrg#ifndef SYNC_IOC_MAGIC
440b8e80941Smrg/* duplicated from linux/sync_file.h to avoid build-time dependency
441b8e80941Smrg * on new (v4.7) kernel headers.  Once distro's are mostly using
442b8e80941Smrg * something newer than v4.7 drop this and #include <linux/sync_file.h>
443b8e80941Smrg * instead.
444b8e80941Smrg */
445b8e80941Smrgstruct sync_merge_data {
446b8e80941Smrg   char  name[32];
447b8e80941Smrg   __s32 fd2;
448b8e80941Smrg   __s32 fence;
449b8e80941Smrg   __u32 flags;
450b8e80941Smrg   __u32 pad;
451b8e80941Smrg};
452b8e80941Smrg
453b8e80941Smrg#define SYNC_IOC_MAGIC '>'
454b8e80941Smrg#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
455b8e80941Smrg#endif
456b8e80941Smrg
457b8e80941Smrgint
458b8e80941Smrganv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
459b8e80941Smrg{
460b8e80941Smrg   struct sync_merge_data args = {
461b8e80941Smrg      .name = "anv merge fence",
462b8e80941Smrg      .fd2 = fd2,
463b8e80941Smrg      .fence = -1,
464b8e80941Smrg   };
465b8e80941Smrg
466b8e80941Smrg   int ret = anv_ioctl(fd1, SYNC_IOC_MERGE, &args);
467b8e80941Smrg   if (ret == -1)
468b8e80941Smrg      return -1;
469b8e80941Smrg
470b8e80941Smrg   return args.fence;
471b8e80941Smrg}
472b8e80941Smrg
473b8e80941Smrguint32_t
474b8e80941Smrganv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
475b8e80941Smrg{
476b8e80941Smrg   struct drm_syncobj_create args = {
477b8e80941Smrg      .flags = flags,
478b8e80941Smrg   };
479b8e80941Smrg
480b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
481b8e80941Smrg   if (ret)
482b8e80941Smrg      return 0;
483b8e80941Smrg
484b8e80941Smrg   return args.handle;
485b8e80941Smrg}
486b8e80941Smrg
487b8e80941Smrgvoid
488b8e80941Smrganv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
489b8e80941Smrg{
490b8e80941Smrg   struct drm_syncobj_destroy args = {
491b8e80941Smrg      .handle = handle,
492b8e80941Smrg   };
493b8e80941Smrg
494b8e80941Smrg   anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
495b8e80941Smrg}
496b8e80941Smrg
497b8e80941Smrgint
498b8e80941Smrganv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
499b8e80941Smrg{
500b8e80941Smrg   struct drm_syncobj_handle args = {
501b8e80941Smrg      .handle = handle,
502b8e80941Smrg   };
503b8e80941Smrg
504b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
505b8e80941Smrg   if (ret)
506b8e80941Smrg      return -1;
507b8e80941Smrg
508b8e80941Smrg   return args.fd;
509b8e80941Smrg}
510b8e80941Smrg
511b8e80941Smrguint32_t
512b8e80941Smrganv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
513b8e80941Smrg{
514b8e80941Smrg   struct drm_syncobj_handle args = {
515b8e80941Smrg      .fd = fd,
516b8e80941Smrg   };
517b8e80941Smrg
518b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
519b8e80941Smrg   if (ret)
520b8e80941Smrg      return 0;
521b8e80941Smrg
522b8e80941Smrg   return args.handle;
523b8e80941Smrg}
524b8e80941Smrg
525b8e80941Smrgint
526b8e80941Smrganv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
527b8e80941Smrg{
528b8e80941Smrg   struct drm_syncobj_handle args = {
529b8e80941Smrg      .handle = handle,
530b8e80941Smrg      .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
531b8e80941Smrg   };
532b8e80941Smrg
533b8e80941Smrg   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
534b8e80941Smrg   if (ret)
535b8e80941Smrg      return -1;
536b8e80941Smrg
537b8e80941Smrg   return args.fd;
538b8e80941Smrg}
539b8e80941Smrg
540b8e80941Smrgint
541b8e80941Smrganv_gem_syncobj_import_sync_file(struct anv_device *device,
542b8e80941Smrg                                 uint32_t handle, int fd)
543b8e80941Smrg{
544b8e80941Smrg   struct drm_syncobj_handle args = {
545b8e80941Smrg      .handle = handle,
546b8e80941Smrg      .fd = fd,
547b8e80941Smrg      .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
548b8e80941Smrg   };
549b8e80941Smrg
550b8e80941Smrg   return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
551b8e80941Smrg}
552b8e80941Smrg
553b8e80941Smrgvoid
554b8e80941Smrganv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
555b8e80941Smrg{
556b8e80941Smrg   struct drm_syncobj_array args = {
557b8e80941Smrg      .handles = (uint64_t)(uintptr_t)&handle,
558b8e80941Smrg      .count_handles = 1,
559b8e80941Smrg   };
560b8e80941Smrg
561b8e80941Smrg   anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
562b8e80941Smrg}
563b8e80941Smrg
564b8e80941Smrgbool
565b8e80941Smrganv_gem_supports_syncobj_wait(int fd)
566b8e80941Smrg{
567b8e80941Smrg   int ret;
568b8e80941Smrg
569b8e80941Smrg   struct drm_syncobj_create create = {
570b8e80941Smrg      .flags = 0,
571b8e80941Smrg   };
572b8e80941Smrg   ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
573b8e80941Smrg   if (ret)
574b8e80941Smrg      return false;
575b8e80941Smrg
576b8e80941Smrg   uint32_t syncobj = create.handle;
577b8e80941Smrg
578b8e80941Smrg   struct drm_syncobj_wait wait = {
579b8e80941Smrg      .handles = (uint64_t)(uintptr_t)&create,
580b8e80941Smrg      .count_handles = 1,
581b8e80941Smrg      .timeout_nsec = 0,
582b8e80941Smrg      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
583b8e80941Smrg   };
584b8e80941Smrg   ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
585b8e80941Smrg
586b8e80941Smrg   struct drm_syncobj_destroy destroy = {
587b8e80941Smrg      .handle = syncobj,
588b8e80941Smrg   };
589b8e80941Smrg   anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
590b8e80941Smrg
591b8e80941Smrg   /* If it timed out, then we have the ioctl and it supports the
592b8e80941Smrg    * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT flag.
593b8e80941Smrg    */
594b8e80941Smrg   return ret == -1 && errno == ETIME;
595b8e80941Smrg}
596b8e80941Smrg
597b8e80941Smrgint
598b8e80941Smrganv_gem_syncobj_wait(struct anv_device *device,
599b8e80941Smrg                     uint32_t *handles, uint32_t num_handles,
600b8e80941Smrg                     int64_t abs_timeout_ns, bool wait_all)
601b8e80941Smrg{
602b8e80941Smrg   struct drm_syncobj_wait args = {
603b8e80941Smrg      .handles = (uint64_t)(uintptr_t)handles,
604b8e80941Smrg      .count_handles = num_handles,
605b8e80941Smrg      .timeout_nsec = abs_timeout_ns,
606b8e80941Smrg      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
607b8e80941Smrg   };
608b8e80941Smrg
609b8e80941Smrg   if (wait_all)
610b8e80941Smrg      args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
611b8e80941Smrg
612b8e80941Smrg   return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
613b8e80941Smrg}
614