1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <sys/ioctl.h>
25#include <sys/types.h>
26#include <sys/mman.h>
27#include <string.h>
28#include <errno.h>
29#include <unistd.h>
30#include <fcntl.h>
31
32#include "anv_private.h"
33#include "common/gen_defines.h"
34
35static int
36anv_ioctl(int fd, unsigned long request, void *arg)
37{
38   int ret;
39
40   do {
41      ret = ioctl(fd, request, arg);
42   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
43
44   return ret;
45}
46
47/**
48 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
49 *
50 * Return gem handle, or 0 on failure. Gem handles are never 0.
51 */
52uint32_t
53anv_gem_create(struct anv_device *device, uint64_t size)
54{
55   struct drm_i915_gem_create gem_create = {
56      .size = size,
57   };
58
59   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
60   if (ret != 0) {
61      /* FIXME: What do we do if this fails? */
62      return 0;
63   }
64
65   return gem_create.handle;
66}
67
68void
69anv_gem_close(struct anv_device *device, uint32_t gem_handle)
70{
71   struct drm_gem_close close = {
72      .handle = gem_handle,
73   };
74
75   anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
76}
77
78/**
79 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
80 */
81void*
82anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
83             uint64_t offset, uint64_t size, uint32_t flags)
84{
85   struct drm_i915_gem_mmap gem_mmap = {
86      .handle = gem_handle,
87      .offset = offset,
88      .size = size,
89      .flags = flags,
90   };
91
92   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
93   if (ret != 0)
94      return MAP_FAILED;
95
96   VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
97   return (void *)(uintptr_t) gem_mmap.addr_ptr;
98}
99
100/* This is just a wrapper around munmap, but it also notifies valgrind that
101 * this map is no longer valid.  Pair this with anv_gem_mmap().
102 */
103void
104anv_gem_munmap(void *p, uint64_t size)
105{
106   VG(VALGRIND_FREELIKE_BLOCK(p, 0));
107   munmap(p, size);
108}
109
110uint32_t
111anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
112{
113   struct drm_i915_gem_userptr userptr = {
114      .user_ptr = (__u64)((unsigned long) mem),
115      .user_size = size,
116      .flags = 0,
117   };
118
119   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
120   if (ret == -1)
121      return 0;
122
123   return userptr.handle;
124}
125
126int
127anv_gem_set_caching(struct anv_device *device,
128                    uint32_t gem_handle, uint32_t caching)
129{
130   struct drm_i915_gem_caching gem_caching = {
131      .handle = gem_handle,
132      .caching = caching,
133   };
134
135   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
136}
137
138int
139anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
140                   uint32_t read_domains, uint32_t write_domain)
141{
142   struct drm_i915_gem_set_domain gem_set_domain = {
143      .handle = gem_handle,
144      .read_domains = read_domains,
145      .write_domain = write_domain,
146   };
147
148   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
149}
150
151/**
152 * Returns 0, 1, or negative to indicate error
153 */
154int
155anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
156{
157   struct drm_i915_gem_busy busy = {
158      .handle = gem_handle,
159   };
160
161   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
162   if (ret < 0)
163      return ret;
164
165   return busy.busy != 0;
166}
167
168/**
169 * On error, \a timeout_ns holds the remaining time.
170 */
171int
172anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
173{
174   struct drm_i915_gem_wait wait = {
175      .bo_handle = gem_handle,
176      .timeout_ns = *timeout_ns,
177      .flags = 0,
178   };
179
180   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
181   *timeout_ns = wait.timeout_ns;
182
183   return ret;
184}
185
186int
187anv_gem_execbuffer(struct anv_device *device,
188                   struct drm_i915_gem_execbuffer2 *execbuf)
189{
190   if (execbuf->flags & I915_EXEC_FENCE_OUT)
191      return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
192   else
193      return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
194}
195
196/** Return -1 on error. */
197int
198anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
199{
200   struct drm_i915_gem_get_tiling get_tiling = {
201      .handle = gem_handle,
202   };
203
204   if (anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
205      assert(!"Failed to get BO tiling");
206      return -1;
207   }
208
209   return get_tiling.tiling_mode;
210}
211
212int
213anv_gem_set_tiling(struct anv_device *device,
214                   uint32_t gem_handle, uint32_t stride, uint32_t tiling)
215{
216   int ret;
217
218   /* set_tiling overwrites the input on the error path, so we have to open
219    * code anv_ioctl.
220    */
221   do {
222      struct drm_i915_gem_set_tiling set_tiling = {
223         .handle = gem_handle,
224         .tiling_mode = tiling,
225         .stride = stride,
226      };
227
228      ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
229   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
230
231   return ret;
232}
233
234int
235anv_gem_get_param(int fd, uint32_t param)
236{
237   int tmp;
238
239   drm_i915_getparam_t gp = {
240      .param = param,
241      .value = &tmp,
242   };
243
244   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
245   if (ret == 0)
246      return tmp;
247
248   return 0;
249}
250
251bool
252anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
253{
254   struct drm_gem_close close;
255   int ret;
256
257   struct drm_i915_gem_create gem_create = {
258      .size = 4096,
259   };
260
261   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
262      assert(!"Failed to create GEM BO");
263      return false;
264   }
265
266   bool swizzled = false;
267
268   /* set_tiling overwrites the input on the error path, so we have to open
269    * code anv_ioctl.
270    */
271   do {
272      struct drm_i915_gem_set_tiling set_tiling = {
273         .handle = gem_create.handle,
274         .tiling_mode = tiling,
275         .stride = tiling == I915_TILING_X ? 512 : 128,
276      };
277
278      ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
279   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
280
281   if (ret != 0) {
282      assert(!"Failed to set BO tiling");
283      goto close_and_return;
284   }
285
286   struct drm_i915_gem_get_tiling get_tiling = {
287      .handle = gem_create.handle,
288   };
289
290   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
291      assert(!"Failed to get BO tiling");
292      goto close_and_return;
293   }
294
295   swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
296
297close_and_return:
298
299   memset(&close, 0, sizeof(close));
300   close.handle = gem_create.handle;
301   anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
302
303   return swizzled;
304}
305
306bool
307anv_gem_has_context_priority(int fd)
308{
309   return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
310                                     GEN_CONTEXT_MEDIUM_PRIORITY);
311}
312
313int
314anv_gem_create_context(struct anv_device *device)
315{
316   struct drm_i915_gem_context_create create = { 0 };
317
318   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
319   if (ret == -1)
320      return -1;
321
322   return create.ctx_id;
323}
324
325int
326anv_gem_destroy_context(struct anv_device *device, int context)
327{
328   struct drm_i915_gem_context_destroy destroy = {
329      .ctx_id = context,
330   };
331
332   return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
333}
334
335int
336anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
337{
338   struct drm_i915_gem_context_param p = {
339      .ctx_id = context,
340      .param = param,
341      .value = value,
342   };
343   int err = 0;
344
345   if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
346      err = -errno;
347   return err;
348}
349
350int
351anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
352{
353   struct drm_i915_gem_context_param gp = {
354      .ctx_id = context,
355      .param = param,
356   };
357
358   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
359   if (ret == -1)
360      return -1;
361
362   *value = gp.value;
363   return 0;
364}
365
366int
367anv_gem_get_aperture(int fd, uint64_t *size)
368{
369   struct drm_i915_gem_get_aperture aperture = { 0 };
370
371   int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
372   if (ret == -1)
373      return -1;
374
375   *size = aperture.aper_available_size;
376
377   return 0;
378}
379
380int
381anv_gem_gpu_get_reset_stats(struct anv_device *device,
382                            uint32_t *active, uint32_t *pending)
383{
384   struct drm_i915_reset_stats stats = {
385      .ctx_id = device->context_id,
386   };
387
388   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
389   if (ret == 0) {
390      *active = stats.batch_active;
391      *pending = stats.batch_pending;
392   }
393
394   return ret;
395}
396
397int
398anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
399{
400   struct drm_prime_handle args = {
401      .handle = gem_handle,
402      .flags = DRM_CLOEXEC,
403   };
404
405   int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
406   if (ret == -1)
407      return -1;
408
409   return args.fd;
410}
411
412uint32_t
413anv_gem_fd_to_handle(struct anv_device *device, int fd)
414{
415   struct drm_prime_handle args = {
416      .fd = fd,
417   };
418
419   int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
420   if (ret == -1)
421      return 0;
422
423   return args.handle;
424}
425
426int
427anv_gem_reg_read(struct anv_device *device, uint32_t offset, uint64_t *result)
428{
429   struct drm_i915_reg_read args = {
430      .offset = offset
431   };
432
433   int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_REG_READ, &args);
434
435   *result = args.val;
436   return ret;
437}
438
439#ifndef SYNC_IOC_MAGIC
440/* duplicated from linux/sync_file.h to avoid build-time dependency
441 * on new (v4.7) kernel headers.  Once distro's are mostly using
442 * something newer than v4.7 drop this and #include <linux/sync_file.h>
443 * instead.
444 */
445struct sync_merge_data {
446   char  name[32];
447   __s32 fd2;
448   __s32 fence;
449   __u32 flags;
450   __u32 pad;
451};
452
453#define SYNC_IOC_MAGIC '>'
454#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
455#endif
456
457int
458anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
459{
460   struct sync_merge_data args = {
461      .name = "anv merge fence",
462      .fd2 = fd2,
463      .fence = -1,
464   };
465
466   int ret = anv_ioctl(fd1, SYNC_IOC_MERGE, &args);
467   if (ret == -1)
468      return -1;
469
470   return args.fence;
471}
472
473uint32_t
474anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
475{
476   struct drm_syncobj_create args = {
477      .flags = flags,
478   };
479
480   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
481   if (ret)
482      return 0;
483
484   return args.handle;
485}
486
487void
488anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
489{
490   struct drm_syncobj_destroy args = {
491      .handle = handle,
492   };
493
494   anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
495}
496
497int
498anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
499{
500   struct drm_syncobj_handle args = {
501      .handle = handle,
502   };
503
504   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
505   if (ret)
506      return -1;
507
508   return args.fd;
509}
510
511uint32_t
512anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
513{
514   struct drm_syncobj_handle args = {
515      .fd = fd,
516   };
517
518   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
519   if (ret)
520      return 0;
521
522   return args.handle;
523}
524
525int
526anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
527{
528   struct drm_syncobj_handle args = {
529      .handle = handle,
530      .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
531   };
532
533   int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
534   if (ret)
535      return -1;
536
537   return args.fd;
538}
539
540int
541anv_gem_syncobj_import_sync_file(struct anv_device *device,
542                                 uint32_t handle, int fd)
543{
544   struct drm_syncobj_handle args = {
545      .handle = handle,
546      .fd = fd,
547      .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
548   };
549
550   return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
551}
552
553void
554anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
555{
556   struct drm_syncobj_array args = {
557      .handles = (uint64_t)(uintptr_t)&handle,
558      .count_handles = 1,
559   };
560
561   anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
562}
563
564bool
565anv_gem_supports_syncobj_wait(int fd)
566{
567   int ret;
568
569   struct drm_syncobj_create create = {
570      .flags = 0,
571   };
572   ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
573   if (ret)
574      return false;
575
576   uint32_t syncobj = create.handle;
577
578   struct drm_syncobj_wait wait = {
579      .handles = (uint64_t)(uintptr_t)&create,
580      .count_handles = 1,
581      .timeout_nsec = 0,
582      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
583   };
584   ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
585
586   struct drm_syncobj_destroy destroy = {
587      .handle = syncobj,
588   };
589   anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
590
591   /* If it timed out, then we have the ioctl and it supports the
592    * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT flag.
593    */
594   return ret == -1 && errno == ETIME;
595}
596
597int
598anv_gem_syncobj_wait(struct anv_device *device,
599                     uint32_t *handles, uint32_t num_handles,
600                     int64_t abs_timeout_ns, bool wait_all)
601{
602   struct drm_syncobj_wait args = {
603      .handles = (uint64_t)(uintptr_t)handles,
604      .count_handles = num_handles,
605      .timeout_nsec = abs_timeout_ns,
606      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
607   };
608
609   if (wait_all)
610      args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
611
612   return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
613}
614