101e04c3fSmrg/*
201e04c3fSmrg * Copyright © 2015 Intel Corporation
301e04c3fSmrg *
401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
501e04c3fSmrg * copy of this software and associated documentation files (the "Software"),
601e04c3fSmrg * to deal in the Software without restriction, including without limitation
701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the
901e04c3fSmrg * Software is furnished to do so, subject to the following conditions:
1001e04c3fSmrg *
1101e04c3fSmrg * The above copyright notice and this permission notice (including the next
1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the
1301e04c3fSmrg * Software.
1401e04c3fSmrg *
1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2001e04c3fSmrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2101e04c3fSmrg * IN THE SOFTWARE.
2201e04c3fSmrg */
2301e04c3fSmrg
2401e04c3fSmrg#include <sys/ioctl.h>
2501e04c3fSmrg#include <sys/types.h>
2601e04c3fSmrg#include <sys/mman.h>
2701e04c3fSmrg#include <string.h>
2801e04c3fSmrg#include <errno.h>
2901e04c3fSmrg#include <unistd.h>
3001e04c3fSmrg#include <fcntl.h>
3101e04c3fSmrg
3201e04c3fSmrg#include "anv_private.h"
337ec681f3Smrg#include "common/intel_defines.h"
347ec681f3Smrg#include "common/intel_gem.h"
357ec681f3Smrg#include "drm-uapi/sync_file.h"
3601e04c3fSmrg
3701e04c3fSmrg/**
3801e04c3fSmrg * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
3901e04c3fSmrg *
4001e04c3fSmrg * Return gem handle, or 0 on failure. Gem handles are never 0.
4101e04c3fSmrg */
4201e04c3fSmrguint32_t
4301e04c3fSmrganv_gem_create(struct anv_device *device, uint64_t size)
4401e04c3fSmrg{
4501e04c3fSmrg   struct drm_i915_gem_create gem_create = {
4601e04c3fSmrg      .size = size,
4701e04c3fSmrg   };
4801e04c3fSmrg
497ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
5001e04c3fSmrg   if (ret != 0) {
5101e04c3fSmrg      /* FIXME: What do we do if this fails? */
5201e04c3fSmrg      return 0;
5301e04c3fSmrg   }
5401e04c3fSmrg
5501e04c3fSmrg   return gem_create.handle;
5601e04c3fSmrg}
5701e04c3fSmrg
5801e04c3fSmrgvoid
5901e04c3fSmrganv_gem_close(struct anv_device *device, uint32_t gem_handle)
6001e04c3fSmrg{
6101e04c3fSmrg   struct drm_gem_close close = {
6201e04c3fSmrg      .handle = gem_handle,
6301e04c3fSmrg   };
6401e04c3fSmrg
657ec681f3Smrg   intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
667ec681f3Smrg}
677ec681f3Smrg
687ec681f3Smrguint32_t
697ec681f3Smrganv_gem_create_regions(struct anv_device *device, uint64_t anv_bo_size,
707ec681f3Smrg                       uint32_t num_regions,
717ec681f3Smrg                       struct drm_i915_gem_memory_class_instance *regions)
727ec681f3Smrg{
737ec681f3Smrg   struct drm_i915_gem_create_ext_memory_regions ext_regions = {
747ec681f3Smrg      .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
757ec681f3Smrg      .num_regions = num_regions,
767ec681f3Smrg      .regions = (uintptr_t)regions,
777ec681f3Smrg   };
787ec681f3Smrg
797ec681f3Smrg   struct drm_i915_gem_create_ext gem_create = {
807ec681f3Smrg      .size = anv_bo_size,
817ec681f3Smrg      .extensions = (uintptr_t) &ext_regions,
827ec681f3Smrg   };
837ec681f3Smrg
847ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE_EXT,
857ec681f3Smrg                         &gem_create);
867ec681f3Smrg   if (ret != 0) {
877ec681f3Smrg      return 0;
887ec681f3Smrg   }
897ec681f3Smrg
907ec681f3Smrg   return gem_create.handle;
9101e04c3fSmrg}
9201e04c3fSmrg
9301e04c3fSmrg/**
9401e04c3fSmrg * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
9501e04c3fSmrg */
967ec681f3Smrgstatic void*
977ec681f3Smrganv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
987ec681f3Smrg                    uint64_t offset, uint64_t size, uint32_t flags)
9901e04c3fSmrg{
1007ec681f3Smrg   struct drm_i915_gem_mmap_offset gem_mmap = {
1017ec681f3Smrg      .handle = gem_handle,
1027ec681f3Smrg      .flags = device->info.has_local_mem ? I915_MMAP_OFFSET_FIXED :
1037ec681f3Smrg         (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
1047ec681f3Smrg   };
1057ec681f3Smrg   assert(offset == 0);
1067ec681f3Smrg
1077ec681f3Smrg   /* Get the fake offset back */
1087ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
1097ec681f3Smrg   if (ret != 0)
1107ec681f3Smrg      return MAP_FAILED;
1117ec681f3Smrg
1127ec681f3Smrg   /* And map it */
1137ec681f3Smrg   void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
1147ec681f3Smrg                    device->fd, gem_mmap.offset);
1157ec681f3Smrg   return map;
1167ec681f3Smrg}
1177ec681f3Smrg
1187ec681f3Smrgstatic void*
1197ec681f3Smrganv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
1207ec681f3Smrg                    uint64_t offset, uint64_t size, uint32_t flags)
1217ec681f3Smrg{
1227ec681f3Smrg   assert(!device->info.has_local_mem);
1237ec681f3Smrg
12401e04c3fSmrg   struct drm_i915_gem_mmap gem_mmap = {
12501e04c3fSmrg      .handle = gem_handle,
12601e04c3fSmrg      .offset = offset,
12701e04c3fSmrg      .size = size,
12801e04c3fSmrg      .flags = flags,
12901e04c3fSmrg   };
13001e04c3fSmrg
1317ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
13201e04c3fSmrg   if (ret != 0)
13301e04c3fSmrg      return MAP_FAILED;
13401e04c3fSmrg
13501e04c3fSmrg   return (void *)(uintptr_t) gem_mmap.addr_ptr;
13601e04c3fSmrg}
13701e04c3fSmrg
1387ec681f3Smrg/**
1397ec681f3Smrg * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
1407ec681f3Smrg */
1417ec681f3Smrgvoid*
1427ec681f3Smrganv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
1437ec681f3Smrg             uint64_t offset, uint64_t size, uint32_t flags)
1447ec681f3Smrg{
1457ec681f3Smrg   void *map;
1467ec681f3Smrg   if (device->physical->has_mmap_offset)
1477ec681f3Smrg      map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
1487ec681f3Smrg   else
1497ec681f3Smrg      map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
1507ec681f3Smrg
1517ec681f3Smrg   if (map != MAP_FAILED)
1527ec681f3Smrg      VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
1537ec681f3Smrg
1547ec681f3Smrg   return map;
1557ec681f3Smrg}
1567ec681f3Smrg
15701e04c3fSmrg/* This is just a wrapper around munmap, but it also notifies valgrind that
15801e04c3fSmrg * this map is no longer valid.  Pair this with anv_gem_mmap().
15901e04c3fSmrg */
16001e04c3fSmrgvoid
1617ec681f3Smrganv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
16201e04c3fSmrg{
16301e04c3fSmrg   VG(VALGRIND_FREELIKE_BLOCK(p, 0));
16401e04c3fSmrg   munmap(p, size);
16501e04c3fSmrg}
16601e04c3fSmrg
16701e04c3fSmrguint32_t
16801e04c3fSmrganv_gem_userptr(struct anv_device *device, void *mem, size_t size)
16901e04c3fSmrg{
17001e04c3fSmrg   struct drm_i915_gem_userptr userptr = {
17101e04c3fSmrg      .user_ptr = (__u64)((unsigned long) mem),
17201e04c3fSmrg      .user_size = size,
17301e04c3fSmrg      .flags = 0,
17401e04c3fSmrg   };
17501e04c3fSmrg
1767ec681f3Smrg   if (device->physical->has_userptr_probe)
1777ec681f3Smrg      userptr.flags |= I915_USERPTR_PROBE;
1787ec681f3Smrg
1797ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
18001e04c3fSmrg   if (ret == -1)
18101e04c3fSmrg      return 0;
18201e04c3fSmrg
18301e04c3fSmrg   return userptr.handle;
18401e04c3fSmrg}
18501e04c3fSmrg
18601e04c3fSmrgint
18701e04c3fSmrganv_gem_set_caching(struct anv_device *device,
18801e04c3fSmrg                    uint32_t gem_handle, uint32_t caching)
18901e04c3fSmrg{
19001e04c3fSmrg   struct drm_i915_gem_caching gem_caching = {
19101e04c3fSmrg      .handle = gem_handle,
19201e04c3fSmrg      .caching = caching,
19301e04c3fSmrg   };
19401e04c3fSmrg
1957ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
19601e04c3fSmrg}
19701e04c3fSmrg
19801e04c3fSmrgint
19901e04c3fSmrganv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
20001e04c3fSmrg                   uint32_t read_domains, uint32_t write_domain)
20101e04c3fSmrg{
20201e04c3fSmrg   struct drm_i915_gem_set_domain gem_set_domain = {
20301e04c3fSmrg      .handle = gem_handle,
20401e04c3fSmrg      .read_domains = read_domains,
20501e04c3fSmrg      .write_domain = write_domain,
20601e04c3fSmrg   };
20701e04c3fSmrg
2087ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
20901e04c3fSmrg}
21001e04c3fSmrg
21101e04c3fSmrg/**
21201e04c3fSmrg * Returns 0, 1, or negative to indicate error
21301e04c3fSmrg */
21401e04c3fSmrgint
21501e04c3fSmrganv_gem_busy(struct anv_device *device, uint32_t gem_handle)
21601e04c3fSmrg{
21701e04c3fSmrg   struct drm_i915_gem_busy busy = {
21801e04c3fSmrg      .handle = gem_handle,
21901e04c3fSmrg   };
22001e04c3fSmrg
2217ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
22201e04c3fSmrg   if (ret < 0)
22301e04c3fSmrg      return ret;
22401e04c3fSmrg
22501e04c3fSmrg   return busy.busy != 0;
22601e04c3fSmrg}
22701e04c3fSmrg
22801e04c3fSmrg/**
22901e04c3fSmrg * On error, \a timeout_ns holds the remaining time.
23001e04c3fSmrg */
23101e04c3fSmrgint
23201e04c3fSmrganv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
23301e04c3fSmrg{
23401e04c3fSmrg   struct drm_i915_gem_wait wait = {
23501e04c3fSmrg      .bo_handle = gem_handle,
23601e04c3fSmrg      .timeout_ns = *timeout_ns,
23701e04c3fSmrg      .flags = 0,
23801e04c3fSmrg   };
23901e04c3fSmrg
2407ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
24101e04c3fSmrg   *timeout_ns = wait.timeout_ns;
24201e04c3fSmrg
24301e04c3fSmrg   return ret;
24401e04c3fSmrg}
24501e04c3fSmrg
24601e04c3fSmrgint
24701e04c3fSmrganv_gem_execbuffer(struct anv_device *device,
24801e04c3fSmrg                   struct drm_i915_gem_execbuffer2 *execbuf)
24901e04c3fSmrg{
25001e04c3fSmrg   if (execbuf->flags & I915_EXEC_FENCE_OUT)
2517ec681f3Smrg      return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
25201e04c3fSmrg   else
2537ec681f3Smrg      return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
25401e04c3fSmrg}
25501e04c3fSmrg
25601e04c3fSmrg/** Return -1 on error. */
25701e04c3fSmrgint
25801e04c3fSmrganv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
25901e04c3fSmrg{
26001e04c3fSmrg   struct drm_i915_gem_get_tiling get_tiling = {
26101e04c3fSmrg      .handle = gem_handle,
26201e04c3fSmrg   };
26301e04c3fSmrg
2647ec681f3Smrg   /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
2657ec681f3Smrg    * anymore, so we will need another way to get the tiling. Apparently this
2667ec681f3Smrg    * is only used in Android code, so we may need some other way to
2677ec681f3Smrg    * communicate the tiling mode.
2687ec681f3Smrg    */
2697ec681f3Smrg   if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
27001e04c3fSmrg      assert(!"Failed to get BO tiling");
27101e04c3fSmrg      return -1;
27201e04c3fSmrg   }
27301e04c3fSmrg
27401e04c3fSmrg   return get_tiling.tiling_mode;
27501e04c3fSmrg}
27601e04c3fSmrg
27701e04c3fSmrgint
27801e04c3fSmrganv_gem_set_tiling(struct anv_device *device,
27901e04c3fSmrg                   uint32_t gem_handle, uint32_t stride, uint32_t tiling)
28001e04c3fSmrg{
28101e04c3fSmrg   int ret;
28201e04c3fSmrg
2837ec681f3Smrg   /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
2847ec681f3Smrg    * nothing needs to be done.
2857ec681f3Smrg    */
2867ec681f3Smrg   if (!device->info.has_tiling_uapi)
2877ec681f3Smrg      return 0;
2887ec681f3Smrg
28901e04c3fSmrg   /* set_tiling overwrites the input on the error path, so we have to open
2907ec681f3Smrg    * code intel_ioctl.
29101e04c3fSmrg    */
29201e04c3fSmrg   do {
29301e04c3fSmrg      struct drm_i915_gem_set_tiling set_tiling = {
29401e04c3fSmrg         .handle = gem_handle,
29501e04c3fSmrg         .tiling_mode = tiling,
29601e04c3fSmrg         .stride = stride,
29701e04c3fSmrg      };
29801e04c3fSmrg
29901e04c3fSmrg      ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
30001e04c3fSmrg   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
30101e04c3fSmrg
30201e04c3fSmrg   return ret;
30301e04c3fSmrg}
30401e04c3fSmrg
30501e04c3fSmrgint
30601e04c3fSmrganv_gem_get_param(int fd, uint32_t param)
30701e04c3fSmrg{
30801e04c3fSmrg   int tmp;
30901e04c3fSmrg
31001e04c3fSmrg   drm_i915_getparam_t gp = {
31101e04c3fSmrg      .param = param,
31201e04c3fSmrg      .value = &tmp,
31301e04c3fSmrg   };
31401e04c3fSmrg
3157ec681f3Smrg   int ret = intel_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
31601e04c3fSmrg   if (ret == 0)
31701e04c3fSmrg      return tmp;
31801e04c3fSmrg
31901e04c3fSmrg   return 0;
32001e04c3fSmrg}
32101e04c3fSmrg
3227ec681f3Smrguint64_t
3237ec681f3Smrganv_gem_get_drm_cap(int fd, uint32_t capability)
3247ec681f3Smrg{
3257ec681f3Smrg   struct drm_get_cap cap = {
3267ec681f3Smrg      .capability = capability,
3277ec681f3Smrg   };
3287ec681f3Smrg
3297ec681f3Smrg   intel_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
3307ec681f3Smrg   return cap.value;
3317ec681f3Smrg}
3327ec681f3Smrg
33301e04c3fSmrgbool
33401e04c3fSmrganv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
33501e04c3fSmrg{
33601e04c3fSmrg   struct drm_gem_close close;
33701e04c3fSmrg   int ret;
33801e04c3fSmrg
33901e04c3fSmrg   struct drm_i915_gem_create gem_create = {
34001e04c3fSmrg      .size = 4096,
34101e04c3fSmrg   };
34201e04c3fSmrg
3437ec681f3Smrg   if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
34401e04c3fSmrg      assert(!"Failed to create GEM BO");
34501e04c3fSmrg      return false;
34601e04c3fSmrg   }
34701e04c3fSmrg
34801e04c3fSmrg   bool swizzled = false;
34901e04c3fSmrg
35001e04c3fSmrg   /* set_tiling overwrites the input on the error path, so we have to open
3517ec681f3Smrg    * code intel_ioctl.
35201e04c3fSmrg    */
35301e04c3fSmrg   do {
35401e04c3fSmrg      struct drm_i915_gem_set_tiling set_tiling = {
35501e04c3fSmrg         .handle = gem_create.handle,
35601e04c3fSmrg         .tiling_mode = tiling,
35701e04c3fSmrg         .stride = tiling == I915_TILING_X ? 512 : 128,
35801e04c3fSmrg      };
35901e04c3fSmrg
36001e04c3fSmrg      ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
36101e04c3fSmrg   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
36201e04c3fSmrg
36301e04c3fSmrg   if (ret != 0) {
36401e04c3fSmrg      assert(!"Failed to set BO tiling");
36501e04c3fSmrg      goto close_and_return;
36601e04c3fSmrg   }
36701e04c3fSmrg
36801e04c3fSmrg   struct drm_i915_gem_get_tiling get_tiling = {
36901e04c3fSmrg      .handle = gem_create.handle,
37001e04c3fSmrg   };
37101e04c3fSmrg
3727ec681f3Smrg   if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
37301e04c3fSmrg      assert(!"Failed to get BO tiling");
37401e04c3fSmrg      goto close_and_return;
37501e04c3fSmrg   }
37601e04c3fSmrg
37701e04c3fSmrg   swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
37801e04c3fSmrg
37901e04c3fSmrgclose_and_return:
38001e04c3fSmrg
38101e04c3fSmrg   memset(&close, 0, sizeof(close));
38201e04c3fSmrg   close.handle = gem_create.handle;
3837ec681f3Smrg   intel_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
38401e04c3fSmrg
38501e04c3fSmrg   return swizzled;
38601e04c3fSmrg}
38701e04c3fSmrg
38801e04c3fSmrgbool
38901e04c3fSmrganv_gem_has_context_priority(int fd)
39001e04c3fSmrg{
39101e04c3fSmrg   return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
3927ec681f3Smrg                                     INTEL_CONTEXT_MEDIUM_PRIORITY);
39301e04c3fSmrg}
39401e04c3fSmrg
39501e04c3fSmrgint
39601e04c3fSmrganv_gem_create_context(struct anv_device *device)
39701e04c3fSmrg{
39801e04c3fSmrg   struct drm_i915_gem_context_create create = { 0 };
39901e04c3fSmrg
4007ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
4017ec681f3Smrg   if (ret == -1)
4027ec681f3Smrg      return -1;
4037ec681f3Smrg
4047ec681f3Smrg   return create.ctx_id;
4057ec681f3Smrg}
4067ec681f3Smrg
4077ec681f3Smrgint
4087ec681f3Smrganv_gem_create_context_engines(struct anv_device *device,
4097ec681f3Smrg                               const struct drm_i915_query_engine_info *info,
4107ec681f3Smrg                               int num_engines, uint16_t *engine_classes)
4117ec681f3Smrg{
4127ec681f3Smrg   const size_t engine_inst_sz = 2 * sizeof(__u16); /* 1 class, 1 instance */
4137ec681f3Smrg   const size_t engines_param_size =
4147ec681f3Smrg      sizeof(__u64) /* extensions */ + num_engines * engine_inst_sz;
4157ec681f3Smrg
4167ec681f3Smrg   void *engines_param = malloc(engines_param_size);
4177ec681f3Smrg   assert(engines_param);
4187ec681f3Smrg   *(__u64*)engines_param = 0;
4197ec681f3Smrg   __u16 *class_inst_ptr = (__u16*)(((__u64*)engines_param) + 1);
4207ec681f3Smrg
4217ec681f3Smrg   /* For each type of drm_i915_gem_engine_class of interest, we keep track of
4227ec681f3Smrg    * the previous engine instance used.
4237ec681f3Smrg    */
4247ec681f3Smrg   int last_engine_idx[] = {
4257ec681f3Smrg      [I915_ENGINE_CLASS_RENDER] = -1,
4267ec681f3Smrg   };
4277ec681f3Smrg
4287ec681f3Smrg   int i915_engine_counts[] = {
4297ec681f3Smrg      [I915_ENGINE_CLASS_RENDER] =
4307ec681f3Smrg         anv_gem_count_engines(info, I915_ENGINE_CLASS_RENDER),
4317ec681f3Smrg   };
4327ec681f3Smrg
4337ec681f3Smrg   /* For each queue, we look for the next instance that matches the class we
4347ec681f3Smrg    * need.
4357ec681f3Smrg    */
4367ec681f3Smrg   for (int i = 0; i < num_engines; i++) {
4377ec681f3Smrg      uint16_t engine_class = engine_classes[i];
4387ec681f3Smrg      if (i915_engine_counts[engine_class] <= 0) {
4397ec681f3Smrg         free(engines_param);
4407ec681f3Smrg         return -1;
4417ec681f3Smrg      }
4427ec681f3Smrg
4437ec681f3Smrg      /* Run through the engines reported by the kernel looking for the next
4447ec681f3Smrg       * matching instance. We loop in case we want to create multiple
4457ec681f3Smrg       * contexts on an engine instance.
4467ec681f3Smrg       */
4477ec681f3Smrg      int engine_instance = -1;
4487ec681f3Smrg      for (int i = 0; i < info->num_engines; i++) {
4497ec681f3Smrg         int *idx = &last_engine_idx[engine_class];
4507ec681f3Smrg         if (++(*idx) >= info->num_engines)
4517ec681f3Smrg            *idx = 0;
4527ec681f3Smrg         if (info->engines[*idx].engine.engine_class == engine_class) {
4537ec681f3Smrg            engine_instance = info->engines[*idx].engine.engine_instance;
4547ec681f3Smrg            break;
4557ec681f3Smrg         }
4567ec681f3Smrg      }
4577ec681f3Smrg      if (engine_instance < 0) {
4587ec681f3Smrg         free(engines_param);
4597ec681f3Smrg         return -1;
4607ec681f3Smrg      }
4617ec681f3Smrg
4627ec681f3Smrg      *class_inst_ptr++ = engine_class;
4637ec681f3Smrg      *class_inst_ptr++ = engine_instance;
4647ec681f3Smrg   }
4657ec681f3Smrg
4667ec681f3Smrg   assert((uintptr_t)engines_param + engines_param_size ==
4677ec681f3Smrg          (uintptr_t)class_inst_ptr);
4687ec681f3Smrg
4697ec681f3Smrg   struct drm_i915_gem_context_create_ext_setparam set_engines = {
4707ec681f3Smrg      .base = {
4717ec681f3Smrg         .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
4727ec681f3Smrg      },
4737ec681f3Smrg      .param = {
4747ec681f3Smrg	 .param = I915_CONTEXT_PARAM_ENGINES,
4757ec681f3Smrg         .value = (uintptr_t)engines_param,
4767ec681f3Smrg         .size = engines_param_size,
4777ec681f3Smrg      }
4787ec681f3Smrg   };
4797ec681f3Smrg   struct drm_i915_gem_context_create_ext create = {
4807ec681f3Smrg      .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
4817ec681f3Smrg      .extensions = (uintptr_t)&set_engines,
4827ec681f3Smrg   };
4837ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, &create);
4847ec681f3Smrg   free(engines_param);
48501e04c3fSmrg   if (ret == -1)
48601e04c3fSmrg      return -1;
48701e04c3fSmrg
48801e04c3fSmrg   return create.ctx_id;
48901e04c3fSmrg}
49001e04c3fSmrg
49101e04c3fSmrgint
49201e04c3fSmrganv_gem_destroy_context(struct anv_device *device, int context)
49301e04c3fSmrg{
49401e04c3fSmrg   struct drm_i915_gem_context_destroy destroy = {
49501e04c3fSmrg      .ctx_id = context,
49601e04c3fSmrg   };
49701e04c3fSmrg
4987ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
49901e04c3fSmrg}
50001e04c3fSmrg
50101e04c3fSmrgint
50201e04c3fSmrganv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
50301e04c3fSmrg{
50401e04c3fSmrg   struct drm_i915_gem_context_param p = {
50501e04c3fSmrg      .ctx_id = context,
50601e04c3fSmrg      .param = param,
50701e04c3fSmrg      .value = value,
50801e04c3fSmrg   };
50901e04c3fSmrg   int err = 0;
51001e04c3fSmrg
5117ec681f3Smrg   if (intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
51201e04c3fSmrg      err = -errno;
51301e04c3fSmrg   return err;
51401e04c3fSmrg}
51501e04c3fSmrg
51601e04c3fSmrgint
51701e04c3fSmrganv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
51801e04c3fSmrg{
51901e04c3fSmrg   struct drm_i915_gem_context_param gp = {
52001e04c3fSmrg      .ctx_id = context,
52101e04c3fSmrg      .param = param,
52201e04c3fSmrg   };
52301e04c3fSmrg
5247ec681f3Smrg   int ret = intel_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
52501e04c3fSmrg   if (ret == -1)
52601e04c3fSmrg      return -1;
52701e04c3fSmrg
52801e04c3fSmrg   *value = gp.value;
52901e04c3fSmrg   return 0;
53001e04c3fSmrg}
53101e04c3fSmrg
53201e04c3fSmrgint
5337ec681f3Smrganv_gem_context_get_reset_stats(int fd, int context,
5347ec681f3Smrg                                uint32_t *active, uint32_t *pending)
53501e04c3fSmrg{
53601e04c3fSmrg   struct drm_i915_reset_stats stats = {
5377ec681f3Smrg      .ctx_id = context,
53801e04c3fSmrg   };
53901e04c3fSmrg
5407ec681f3Smrg   int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
54101e04c3fSmrg   if (ret == 0) {
54201e04c3fSmrg      *active = stats.batch_active;
54301e04c3fSmrg      *pending = stats.batch_pending;
54401e04c3fSmrg   }
54501e04c3fSmrg
54601e04c3fSmrg   return ret;
54701e04c3fSmrg}
54801e04c3fSmrg
54901e04c3fSmrgint
55001e04c3fSmrganv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
55101e04c3fSmrg{
55201e04c3fSmrg   struct drm_prime_handle args = {
55301e04c3fSmrg      .handle = gem_handle,
5547ec681f3Smrg      .flags = DRM_CLOEXEC | DRM_RDWR,
55501e04c3fSmrg   };
55601e04c3fSmrg
5577ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
55801e04c3fSmrg   if (ret == -1)
55901e04c3fSmrg      return -1;
56001e04c3fSmrg
56101e04c3fSmrg   return args.fd;
56201e04c3fSmrg}
56301e04c3fSmrg
56401e04c3fSmrguint32_t
56501e04c3fSmrganv_gem_fd_to_handle(struct anv_device *device, int fd)
56601e04c3fSmrg{
56701e04c3fSmrg   struct drm_prime_handle args = {
56801e04c3fSmrg      .fd = fd,
56901e04c3fSmrg   };
57001e04c3fSmrg
5717ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
57201e04c3fSmrg   if (ret == -1)
57301e04c3fSmrg      return 0;
57401e04c3fSmrg
57501e04c3fSmrg   return args.handle;
57601e04c3fSmrg}
57701e04c3fSmrg
57801e04c3fSmrgint
5797ec681f3Smrganv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
58001e04c3fSmrg{
58101e04c3fSmrg   struct drm_i915_reg_read args = {
58201e04c3fSmrg      .offset = offset
58301e04c3fSmrg   };
58401e04c3fSmrg
5857ec681f3Smrg   int ret = intel_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
58601e04c3fSmrg
58701e04c3fSmrg   *result = args.val;
58801e04c3fSmrg   return ret;
58901e04c3fSmrg}
59001e04c3fSmrg
59101e04c3fSmrgint
59201e04c3fSmrganv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
59301e04c3fSmrg{
59401e04c3fSmrg   struct sync_merge_data args = {
59501e04c3fSmrg      .name = "anv merge fence",
59601e04c3fSmrg      .fd2 = fd2,
59701e04c3fSmrg      .fence = -1,
59801e04c3fSmrg   };
59901e04c3fSmrg
6007ec681f3Smrg   int ret = intel_ioctl(fd1, SYNC_IOC_MERGE, &args);
60101e04c3fSmrg   if (ret == -1)
60201e04c3fSmrg      return -1;
60301e04c3fSmrg
60401e04c3fSmrg   return args.fence;
60501e04c3fSmrg}
60601e04c3fSmrg
60701e04c3fSmrguint32_t
60801e04c3fSmrganv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
60901e04c3fSmrg{
61001e04c3fSmrg   struct drm_syncobj_create args = {
61101e04c3fSmrg      .flags = flags,
61201e04c3fSmrg   };
61301e04c3fSmrg
6147ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
61501e04c3fSmrg   if (ret)
61601e04c3fSmrg      return 0;
61701e04c3fSmrg
61801e04c3fSmrg   return args.handle;
61901e04c3fSmrg}
62001e04c3fSmrg
62101e04c3fSmrgvoid
62201e04c3fSmrganv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
62301e04c3fSmrg{
62401e04c3fSmrg   struct drm_syncobj_destroy args = {
62501e04c3fSmrg      .handle = handle,
62601e04c3fSmrg   };
62701e04c3fSmrg
6287ec681f3Smrg   intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
62901e04c3fSmrg}
63001e04c3fSmrg
63101e04c3fSmrgint
63201e04c3fSmrganv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
63301e04c3fSmrg{
63401e04c3fSmrg   struct drm_syncobj_handle args = {
63501e04c3fSmrg      .handle = handle,
63601e04c3fSmrg   };
63701e04c3fSmrg
6387ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
63901e04c3fSmrg   if (ret)
64001e04c3fSmrg      return -1;
64101e04c3fSmrg
64201e04c3fSmrg   return args.fd;
64301e04c3fSmrg}
64401e04c3fSmrg
64501e04c3fSmrguint32_t
64601e04c3fSmrganv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
64701e04c3fSmrg{
64801e04c3fSmrg   struct drm_syncobj_handle args = {
64901e04c3fSmrg      .fd = fd,
65001e04c3fSmrg   };
65101e04c3fSmrg
6527ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
65301e04c3fSmrg   if (ret)
65401e04c3fSmrg      return 0;
65501e04c3fSmrg
65601e04c3fSmrg   return args.handle;
65701e04c3fSmrg}
65801e04c3fSmrg
65901e04c3fSmrgint
66001e04c3fSmrganv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
66101e04c3fSmrg{
66201e04c3fSmrg   struct drm_syncobj_handle args = {
66301e04c3fSmrg      .handle = handle,
66401e04c3fSmrg      .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
66501e04c3fSmrg   };
66601e04c3fSmrg
6677ec681f3Smrg   int ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
66801e04c3fSmrg   if (ret)
66901e04c3fSmrg      return -1;
67001e04c3fSmrg
67101e04c3fSmrg   return args.fd;
67201e04c3fSmrg}
67301e04c3fSmrg
67401e04c3fSmrgint
67501e04c3fSmrganv_gem_syncobj_import_sync_file(struct anv_device *device,
67601e04c3fSmrg                                 uint32_t handle, int fd)
67701e04c3fSmrg{
67801e04c3fSmrg   struct drm_syncobj_handle args = {
67901e04c3fSmrg      .handle = handle,
68001e04c3fSmrg      .fd = fd,
68101e04c3fSmrg      .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
68201e04c3fSmrg   };
68301e04c3fSmrg
6847ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
68501e04c3fSmrg}
68601e04c3fSmrg
68701e04c3fSmrgvoid
68801e04c3fSmrganv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
68901e04c3fSmrg{
69001e04c3fSmrg   struct drm_syncobj_array args = {
69101e04c3fSmrg      .handles = (uint64_t)(uintptr_t)&handle,
69201e04c3fSmrg      .count_handles = 1,
69301e04c3fSmrg   };
69401e04c3fSmrg
6957ec681f3Smrg   intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
69601e04c3fSmrg}
69701e04c3fSmrg
69801e04c3fSmrgbool
69901e04c3fSmrganv_gem_supports_syncobj_wait(int fd)
70001e04c3fSmrg{
7017ec681f3Smrg   return intel_gem_supports_syncobj_wait(fd);
7027ec681f3Smrg}
70301e04c3fSmrg
7047ec681f3Smrgint
7057ec681f3Smrganv_gem_syncobj_wait(struct anv_device *device,
7067ec681f3Smrg                     const uint32_t *handles, uint32_t num_handles,
7077ec681f3Smrg                     int64_t abs_timeout_ns, bool wait_all)
7087ec681f3Smrg{
7097ec681f3Smrg   struct drm_syncobj_wait args = {
7107ec681f3Smrg      .handles = (uint64_t)(uintptr_t)handles,
7117ec681f3Smrg      .count_handles = num_handles,
7127ec681f3Smrg      .timeout_nsec = abs_timeout_ns,
71301e04c3fSmrg      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
71401e04c3fSmrg   };
71501e04c3fSmrg
7167ec681f3Smrg   if (wait_all)
7177ec681f3Smrg      args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
71801e04c3fSmrg
7197ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
72001e04c3fSmrg}
72101e04c3fSmrg
72201e04c3fSmrgint
7237ec681f3Smrganv_gem_syncobj_timeline_wait(struct anv_device *device,
7247ec681f3Smrg                              const uint32_t *handles, const uint64_t *points,
7257ec681f3Smrg                              uint32_t num_items, int64_t abs_timeout_ns,
7267ec681f3Smrg                              bool wait_all, bool wait_materialize)
72701e04c3fSmrg{
7287ec681f3Smrg   assert(device->physical->has_syncobj_wait_available);
7297ec681f3Smrg
7307ec681f3Smrg   struct drm_syncobj_timeline_wait args = {
73101e04c3fSmrg      .handles = (uint64_t)(uintptr_t)handles,
7327ec681f3Smrg      .points = (uint64_t)(uintptr_t)points,
7337ec681f3Smrg      .count_handles = num_items,
73401e04c3fSmrg      .timeout_nsec = abs_timeout_ns,
73501e04c3fSmrg      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
73601e04c3fSmrg   };
73701e04c3fSmrg
73801e04c3fSmrg   if (wait_all)
73901e04c3fSmrg      args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
7407ec681f3Smrg   if (wait_materialize)
7417ec681f3Smrg      args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;
7427ec681f3Smrg
7437ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
7447ec681f3Smrg}
7457ec681f3Smrg
7467ec681f3Smrgint
7477ec681f3Smrganv_gem_syncobj_timeline_signal(struct anv_device *device,
7487ec681f3Smrg                                const uint32_t *handles, const uint64_t *points,
7497ec681f3Smrg                                uint32_t num_items)
7507ec681f3Smrg{
7517ec681f3Smrg   assert(device->physical->has_syncobj_wait_available);
7527ec681f3Smrg
7537ec681f3Smrg   struct drm_syncobj_timeline_array args = {
7547ec681f3Smrg      .handles = (uint64_t)(uintptr_t)handles,
7557ec681f3Smrg      .points = (uint64_t)(uintptr_t)points,
7567ec681f3Smrg      .count_handles = num_items,
7577ec681f3Smrg   };
7587ec681f3Smrg
7597ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
7607ec681f3Smrg}
7617ec681f3Smrg
7627ec681f3Smrgint
7637ec681f3Smrganv_gem_syncobj_timeline_query(struct anv_device *device,
7647ec681f3Smrg                               const uint32_t *handles, uint64_t *points,
7657ec681f3Smrg                               uint32_t num_items)
7667ec681f3Smrg{
7677ec681f3Smrg   assert(device->physical->has_syncobj_wait_available);
7687ec681f3Smrg
7697ec681f3Smrg   struct drm_syncobj_timeline_array args = {
7707ec681f3Smrg      .handles = (uint64_t)(uintptr_t)handles,
7717ec681f3Smrg      .points = (uint64_t)(uintptr_t)points,
7727ec681f3Smrg      .count_handles = num_items,
7737ec681f3Smrg   };
77401e04c3fSmrg
7757ec681f3Smrg   return intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
7767ec681f3Smrg}
7777ec681f3Smrg
7787ec681f3Smrgstruct drm_i915_query_engine_info *
7797ec681f3Smrganv_gem_get_engine_info(int fd)
7807ec681f3Smrg{
7817ec681f3Smrg   return intel_i915_query_alloc(fd, DRM_I915_QUERY_ENGINE_INFO);
7827ec681f3Smrg}
7837ec681f3Smrg
7847ec681f3Smrgint
7857ec681f3Smrganv_gem_count_engines(const struct drm_i915_query_engine_info *info,
7867ec681f3Smrg                      uint16_t engine_class)
7877ec681f3Smrg{
7887ec681f3Smrg   int count = 0;
7897ec681f3Smrg   for (int i = 0; i < info->num_engines; i++) {
7907ec681f3Smrg      if (info->engines[i].engine.engine_class == engine_class)
7917ec681f3Smrg         count++;
7927ec681f3Smrg   }
7937ec681f3Smrg   return count;
79401e04c3fSmrg}
795