intel_bufmgr_gem.c revision 6260e5d5
122944501Smrg/************************************************************************** 222944501Smrg * 322944501Smrg * Copyright � 2007 Red Hat Inc. 420131375Smrg * Copyright � 2007-2012 Intel Corporation 522944501Smrg * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA 622944501Smrg * All Rights Reserved. 722944501Smrg * 822944501Smrg * Permission is hereby granted, free of charge, to any person obtaining a 922944501Smrg * copy of this software and associated documentation files (the 1022944501Smrg * "Software"), to deal in the Software without restriction, including 1122944501Smrg * without limitation the rights to use, copy, modify, merge, publish, 1222944501Smrg * distribute, sub license, and/or sell copies of the Software, and to 1322944501Smrg * permit persons to whom the Software is furnished to do so, subject to 1422944501Smrg * the following conditions: 1522944501Smrg * 1622944501Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1722944501Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1822944501Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 1922944501Smrg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 2022944501Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 2122944501Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 2222944501Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE. 2322944501Smrg * 2422944501Smrg * The above copyright notice and this permission notice (including the 2522944501Smrg * next paragraph) shall be included in all copies or substantial portions 2622944501Smrg * of the Software. 2722944501Smrg * 2822944501Smrg * 2922944501Smrg **************************************************************************/ 3022944501Smrg/* 3122944501Smrg * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com> 3222944501Smrg * Keith Whitwell <keithw-at-tungstengraphics-dot-com> 3322944501Smrg * Eric Anholt <eric@anholt.net> 3422944501Smrg * Dave Airlie <airlied@linux.ie> 3522944501Smrg */ 3622944501Smrg 3722944501Smrg#include <xf86drm.h> 3822944501Smrg#include <xf86atomic.h> 3922944501Smrg#include <fcntl.h> 4022944501Smrg#include <stdio.h> 4122944501Smrg#include <stdlib.h> 4222944501Smrg#include <string.h> 4322944501Smrg#include <unistd.h> 4422944501Smrg#include <assert.h> 4522944501Smrg#include <pthread.h> 462e6867f6Smrg#include <stddef.h> 4722944501Smrg#include <sys/ioctl.h> 4822944501Smrg#include <sys/stat.h> 4922944501Smrg#include <sys/types.h> 5020131375Smrg#include <stdbool.h> 5122944501Smrg 5222944501Smrg#include "errno.h" 5320131375Smrg#ifndef ETIME 5420131375Smrg#define ETIME ETIMEDOUT 5520131375Smrg#endif 56424e9256Smrg#include "libdrm_macros.h" 5722944501Smrg#include "libdrm_lists.h" 5822944501Smrg#include "intel_bufmgr.h" 5922944501Smrg#include "intel_bufmgr_priv.h" 6022944501Smrg#include "intel_chipset.h" 6122944501Smrg#include "string.h" 6222944501Smrg 6322944501Smrg#include "i915_drm.h" 642ee35494Smrg#include "uthash.h" 6522944501Smrg 662b90624aSmrg#if HAVE_VALGRIND 6720131375Smrg#include <valgrind.h> 6820131375Smrg#include <memcheck.h> 6920131375Smrg#define VG(x) x 7020131375Smrg#else 7120131375Smrg#define VG(x) 7220131375Smrg#endif 7320131375Smrg 74424e9256Smrg#define memclear(s) memset(&s, 0, sizeof(s)) 7520131375Smrg 7622944501Smrg#define DBG(...) do { \ 7722944501Smrg if (bufmgr_gem->bufmgr.debug) \ 7822944501Smrg fprintf(stderr, __VA_ARGS__); \ 7922944501Smrg} while (0) 8022944501Smrg 81aaba2545Smrg#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 82fe517fc9Smrg#define MAX2(A, B) ((A) > (B) ? (A) : (B)) 83fe517fc9Smrg 84fe517fc9Smrg/** 85fe517fc9Smrg * upper_32_bits - return bits 32-63 of a number 86fe517fc9Smrg * @n: the number we're accessing 87fe517fc9Smrg * 88fe517fc9Smrg * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress 89fe517fc9Smrg * the "right shift count >= width of type" warning when that quantity is 90fe517fc9Smrg * 32-bits. 91fe517fc9Smrg */ 92fe517fc9Smrg#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16)) 93fe517fc9Smrg 94fe517fc9Smrg/** 95fe517fc9Smrg * lower_32_bits - return bits 0-31 of a number 96fe517fc9Smrg * @n: the number we're accessing 97fe517fc9Smrg */ 98fe517fc9Smrg#define lower_32_bits(n) ((__u32)(n)) 99aaba2545Smrg 10022944501Smrgtypedef struct _drm_intel_bo_gem drm_intel_bo_gem; 10122944501Smrg 10222944501Smrgstruct drm_intel_gem_bo_bucket { 10322944501Smrg drmMMListHead head; 10422944501Smrg unsigned long size; 10522944501Smrg}; 10622944501Smrg 10722944501Smrgtypedef struct _drm_intel_bufmgr_gem { 10822944501Smrg drm_intel_bufmgr bufmgr; 10922944501Smrg 110a884aba1Smrg atomic_t refcount; 111a884aba1Smrg 11222944501Smrg int fd; 11322944501Smrg 11422944501Smrg int max_relocs; 11522944501Smrg 11622944501Smrg pthread_mutex_t lock; 11722944501Smrg 11822944501Smrg struct drm_i915_gem_exec_object *exec_objects; 11922944501Smrg struct drm_i915_gem_exec_object2 *exec2_objects; 12022944501Smrg drm_intel_bo **exec_bos; 12122944501Smrg int exec_size; 12222944501Smrg int exec_count; 12322944501Smrg 12422944501Smrg /** Array of lists of cached gem objects of power-of-two sizes */ 125aaba2545Smrg struct drm_intel_gem_bo_bucket cache_bucket[14 * 4]; 126aaba2545Smrg int num_buckets; 1276d98c517Smrg time_t time; 12822944501Smrg 129a884aba1Smrg drmMMListHead managers; 130a884aba1Smrg 1312ee35494Smrg drm_intel_bo_gem *name_table; 1322ee35494Smrg drm_intel_bo_gem *handle_table; 1332ee35494Smrg 13420131375Smrg drmMMListHead vma_cache; 13520131375Smrg int vma_count, vma_open, vma_max; 13620131375Smrg 13722944501Smrg uint64_t gtt_size; 13822944501Smrg int available_fences; 13922944501Smrg int pci_device; 14022944501Smrg int gen; 1419ce4edccSmrg unsigned int has_bsd : 1; 1429ce4edccSmrg unsigned int has_blt : 1; 1439ce4edccSmrg unsigned int has_relaxed_fencing : 1; 14420131375Smrg unsigned int has_llc : 1; 14520131375Smrg unsigned int has_wait_timeout : 1; 1469ce4edccSmrg unsigned int bo_reuse : 1; 14720131375Smrg unsigned int no_exec : 1; 14820131375Smrg unsigned int has_vebox : 1; 1492ee35494Smrg unsigned int has_exec_async : 1; 15020131375Smrg bool fenced_relocs; 15120131375Smrg 152424e9256Smrg struct { 153424e9256Smrg void *ptr; 154424e9256Smrg uint32_t handle; 155424e9256Smrg } userptr_active; 156424e9256Smrg 15722944501Smrg} drm_intel_bufmgr_gem; 15822944501Smrg 15922944501Smrg#define DRM_INTEL_RELOC_FENCE (1<<0) 16022944501Smrg 16122944501Smrgtypedef struct _drm_intel_reloc_target_info { 16222944501Smrg drm_intel_bo *bo; 16322944501Smrg int flags; 16422944501Smrg} drm_intel_reloc_target; 16522944501Smrg 16622944501Smrgstruct _drm_intel_bo_gem { 16722944501Smrg drm_intel_bo bo; 16822944501Smrg 16922944501Smrg atomic_t refcount; 17022944501Smrg uint32_t gem_handle; 17122944501Smrg const char *name; 17222944501Smrg 17322944501Smrg /** 17422944501Smrg * Kenel-assigned global name for this object 17520131375Smrg * 17620131375Smrg * List contains both flink named and prime fd'd objects 17722944501Smrg */ 17822944501Smrg unsigned int global_name; 1792ee35494Smrg 1802ee35494Smrg UT_hash_handle handle_hh; 1812ee35494Smrg UT_hash_handle name_hh; 18222944501Smrg 18322944501Smrg /** 18422944501Smrg * Index of the buffer within the validation list while preparing a 18522944501Smrg * batchbuffer execution. 18622944501Smrg */ 18722944501Smrg int validate_index; 18822944501Smrg 18922944501Smrg /** 19022944501Smrg * Current tiling mode 19122944501Smrg */ 19222944501Smrg uint32_t tiling_mode; 19322944501Smrg uint32_t swizzle_mode; 1946d98c517Smrg unsigned long stride; 19522944501Smrg 1962ee35494Smrg unsigned long kflags; 1972ee35494Smrg 19822944501Smrg time_t free_time; 19922944501Smrg 20022944501Smrg /** Array passed to the DRM containing relocation information. */ 20122944501Smrg struct drm_i915_gem_relocation_entry *relocs; 20222944501Smrg /** 20322944501Smrg * Array of info structs corresponding to relocs[i].target_handle etc 20422944501Smrg */ 20522944501Smrg drm_intel_reloc_target *reloc_target_info; 20622944501Smrg /** Number of entries in relocs */ 20722944501Smrg int reloc_count; 208fe517fc9Smrg /** Array of BOs that are referenced by this buffer and will be softpinned */ 209fe517fc9Smrg drm_intel_bo **softpin_target; 210fe517fc9Smrg /** Number softpinned BOs that are referenced by this buffer */ 211fe517fc9Smrg int softpin_target_count; 212fe517fc9Smrg /** Maximum amount of softpinned BOs that are referenced by this buffer */ 213fe517fc9Smrg int softpin_target_size; 214fe517fc9Smrg 21522944501Smrg /** Mapped address for the buffer, saved across map/unmap cycles */ 21622944501Smrg void *mem_virtual; 21722944501Smrg /** GTT virtual address for the buffer, saved across map/unmap cycles */ 21822944501Smrg void *gtt_virtual; 2192ee35494Smrg /** WC CPU address for the buffer, saved across map/unmap cycles */ 2202ee35494Smrg void *wc_virtual; 221a884aba1Smrg /** 222a884aba1Smrg * Virtual address of the buffer allocated by user, used for userptr 223a884aba1Smrg * objects only. 224a884aba1Smrg */ 225a884aba1Smrg void *user_virtual; 22620131375Smrg int map_count; 22720131375Smrg drmMMListHead vma_list; 22822944501Smrg 22922944501Smrg /** BO cache list */ 23022944501Smrg drmMMListHead head; 23122944501Smrg 23222944501Smrg /** 23322944501Smrg * Boolean of whether this BO and its children have been included in 23422944501Smrg * the current drm_intel_bufmgr_check_aperture_space() total. 23522944501Smrg */ 23620131375Smrg bool included_in_check_aperture; 23722944501Smrg 23822944501Smrg /** 23922944501Smrg * Boolean of whether this buffer has been used as a relocation 24022944501Smrg * target and had its size accounted for, and thus can't have any 24122944501Smrg * further relocations added to it. 24222944501Smrg */ 24320131375Smrg bool used_as_reloc_target; 24422944501Smrg 24522944501Smrg /** 24622944501Smrg * Boolean of whether we have encountered an error whilst building the relocation tree. 24722944501Smrg */ 24820131375Smrg bool has_error; 24922944501Smrg 25022944501Smrg /** 25122944501Smrg * Boolean of whether this buffer can be re-used 25222944501Smrg */ 25320131375Smrg bool reusable; 25420131375Smrg 25520131375Smrg /** 25620131375Smrg * Boolean of whether the GPU is definitely not accessing the buffer. 25720131375Smrg * 25820131375Smrg * This is only valid when reusable, since non-reusable 2592ee35494Smrg * buffers are those that have been shared with other 26020131375Smrg * processes, so we don't know their state. 26120131375Smrg */ 26220131375Smrg bool idle; 26322944501Smrg 264a884aba1Smrg /** 265a884aba1Smrg * Boolean of whether this buffer was allocated with userptr 266a884aba1Smrg */ 267a884aba1Smrg bool is_userptr; 268a884aba1Smrg 26922944501Smrg /** 27022944501Smrg * Size in bytes of this buffer and its relocation descendents. 27122944501Smrg * 27222944501Smrg * Used to avoid costly tree walking in 27322944501Smrg * drm_intel_bufmgr_check_aperture in the common case. 27422944501Smrg */ 27522944501Smrg int reloc_tree_size; 27622944501Smrg 27722944501Smrg /** 27822944501Smrg * Number of potential fence registers required by this buffer and its 27922944501Smrg * relocations. 28022944501Smrg */ 28122944501Smrg int reloc_tree_fences; 28220131375Smrg 2832ee35494Smrg /** Flags that we may need to do the SW_FINISH ioctl on unmap. */ 28420131375Smrg bool mapped_cpu_write; 28522944501Smrg}; 28622944501Smrg 28722944501Smrgstatic unsigned int 28822944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count); 28922944501Smrg 29022944501Smrgstatic unsigned int 29122944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count); 29222944501Smrg 29322944501Smrgstatic int 29422944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 29522944501Smrg uint32_t * swizzle_mode); 29622944501Smrg 29722944501Smrgstatic int 2986d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, 2996d98c517Smrg uint32_t tiling_mode, 3006d98c517Smrg uint32_t stride); 30122944501Smrg 30222944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, 30322944501Smrg time_t time); 30422944501Smrg 30522944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo); 30622944501Smrg 30722944501Smrgstatic void drm_intel_gem_bo_free(drm_intel_bo *bo); 30822944501Smrg 309fe517fc9Smrgstatic inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo) 310fe517fc9Smrg{ 311fe517fc9Smrg return (drm_intel_bo_gem *)bo; 312fe517fc9Smrg} 313fe517fc9Smrg 31422944501Smrgstatic unsigned long 31522944501Smrgdrm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size, 31622944501Smrg uint32_t *tiling_mode) 31722944501Smrg{ 31822944501Smrg unsigned long min_size, max_size; 31922944501Smrg unsigned long i; 32022944501Smrg 32122944501Smrg if (*tiling_mode == I915_TILING_NONE) 32222944501Smrg return size; 32322944501Smrg 32422944501Smrg /* 965+ just need multiples of page size for tiling */ 32522944501Smrg if (bufmgr_gem->gen >= 4) 32622944501Smrg return ROUND_UP_TO(size, 4096); 32722944501Smrg 32822944501Smrg /* Older chips need powers of two, of at least 512k or 1M */ 32922944501Smrg if (bufmgr_gem->gen == 3) { 33022944501Smrg min_size = 1024*1024; 33122944501Smrg max_size = 128*1024*1024; 33222944501Smrg } else { 33322944501Smrg min_size = 512*1024; 33422944501Smrg max_size = 64*1024*1024; 33522944501Smrg } 33622944501Smrg 33722944501Smrg if (size > max_size) { 33822944501Smrg *tiling_mode = I915_TILING_NONE; 33922944501Smrg return size; 34022944501Smrg } 34122944501Smrg 3429ce4edccSmrg /* Do we need to allocate every page for the fence? */ 3439ce4edccSmrg if (bufmgr_gem->has_relaxed_fencing) 3449ce4edccSmrg return ROUND_UP_TO(size, 4096); 3459ce4edccSmrg 34622944501Smrg for (i = min_size; i < size; i <<= 1) 34722944501Smrg ; 34822944501Smrg 34922944501Smrg return i; 35022944501Smrg} 35122944501Smrg 35222944501Smrg/* 35322944501Smrg * Round a given pitch up to the minimum required for X tiling on a 35422944501Smrg * given chip. We use 512 as the minimum to allow for a later tiling 35522944501Smrg * change. 35622944501Smrg */ 35722944501Smrgstatic unsigned long 35822944501Smrgdrm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, 3596d98c517Smrg unsigned long pitch, uint32_t *tiling_mode) 36022944501Smrg{ 36122944501Smrg unsigned long tile_width; 36222944501Smrg unsigned long i; 36322944501Smrg 36422944501Smrg /* If untiled, then just align it so that we can do rendering 36522944501Smrg * to it with the 3D engine. 36622944501Smrg */ 3676d98c517Smrg if (*tiling_mode == I915_TILING_NONE) 36822944501Smrg return ALIGN(pitch, 64); 36922944501Smrg 37020131375Smrg if (*tiling_mode == I915_TILING_X 37120131375Smrg || (IS_915(bufmgr_gem->pci_device) 37220131375Smrg && *tiling_mode == I915_TILING_Y)) 37322944501Smrg tile_width = 512; 37422944501Smrg else 37522944501Smrg tile_width = 128; 37622944501Smrg 37722944501Smrg /* 965 is flexible */ 37822944501Smrg if (bufmgr_gem->gen >= 4) 37922944501Smrg return ROUND_UP_TO(pitch, tile_width); 38022944501Smrg 3816d98c517Smrg /* The older hardware has a maximum pitch of 8192 with tiled 3826d98c517Smrg * surfaces, so fallback to untiled if it's too large. 3836d98c517Smrg */ 3846d98c517Smrg if (pitch > 8192) { 3856d98c517Smrg *tiling_mode = I915_TILING_NONE; 3866d98c517Smrg return ALIGN(pitch, 64); 3876d98c517Smrg } 3886d98c517Smrg 38922944501Smrg /* Pre-965 needs power of two tile width */ 39022944501Smrg for (i = tile_width; i < pitch; i <<= 1) 39122944501Smrg ; 39222944501Smrg 39322944501Smrg return i; 39422944501Smrg} 39522944501Smrg 39622944501Smrgstatic struct drm_intel_gem_bo_bucket * 39722944501Smrgdrm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem, 39822944501Smrg unsigned long size) 39922944501Smrg{ 40022944501Smrg int i; 40122944501Smrg 402aaba2545Smrg for (i = 0; i < bufmgr_gem->num_buckets; i++) { 40322944501Smrg struct drm_intel_gem_bo_bucket *bucket = 40422944501Smrg &bufmgr_gem->cache_bucket[i]; 40522944501Smrg if (bucket->size >= size) { 40622944501Smrg return bucket; 40722944501Smrg } 40822944501Smrg } 40922944501Smrg 41022944501Smrg return NULL; 41122944501Smrg} 41222944501Smrg 41322944501Smrgstatic void 41422944501Smrgdrm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) 41522944501Smrg{ 41622944501Smrg int i, j; 41722944501Smrg 41822944501Smrg for (i = 0; i < bufmgr_gem->exec_count; i++) { 41922944501Smrg drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 42022944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 42122944501Smrg 422fe517fc9Smrg if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) { 423fe517fc9Smrg DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle, 4240655efefSmrg bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "", 42522944501Smrg bo_gem->name); 42622944501Smrg continue; 42722944501Smrg } 42822944501Smrg 42922944501Smrg for (j = 0; j < bo_gem->reloc_count; j++) { 43022944501Smrg drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo; 43122944501Smrg drm_intel_bo_gem *target_gem = 43222944501Smrg (drm_intel_bo_gem *) target_bo; 43322944501Smrg 434fe517fc9Smrg DBG("%2d: %d %s(%s)@0x%08x %08x -> " 435fe517fc9Smrg "%d (%s)@0x%08x %08x + 0x%08x\n", 43622944501Smrg i, 437fe517fc9Smrg bo_gem->gem_handle, 4380655efefSmrg bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "", 439fe517fc9Smrg bo_gem->name, 440fe517fc9Smrg upper_32_bits(bo_gem->relocs[j].offset), 441fe517fc9Smrg lower_32_bits(bo_gem->relocs[j].offset), 44222944501Smrg target_gem->gem_handle, 44322944501Smrg target_gem->name, 444fe517fc9Smrg upper_32_bits(target_bo->offset64), 445fe517fc9Smrg lower_32_bits(target_bo->offset64), 44622944501Smrg bo_gem->relocs[j].delta); 44722944501Smrg } 448fe517fc9Smrg 449fe517fc9Smrg for (j = 0; j < bo_gem->softpin_target_count; j++) { 450fe517fc9Smrg drm_intel_bo *target_bo = bo_gem->softpin_target[j]; 451fe517fc9Smrg drm_intel_bo_gem *target_gem = 452fe517fc9Smrg (drm_intel_bo_gem *) target_bo; 453fe517fc9Smrg DBG("%2d: %d %s(%s) -> " 454fe517fc9Smrg "%d *(%s)@0x%08x %08x\n", 455fe517fc9Smrg i, 456fe517fc9Smrg bo_gem->gem_handle, 4570655efefSmrg bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "", 458fe517fc9Smrg bo_gem->name, 459fe517fc9Smrg target_gem->gem_handle, 460fe517fc9Smrg target_gem->name, 461fe517fc9Smrg upper_32_bits(target_bo->offset64), 462fe517fc9Smrg lower_32_bits(target_bo->offset64)); 463fe517fc9Smrg } 46422944501Smrg } 46522944501Smrg} 46622944501Smrg 46722944501Smrgstatic inline void 46822944501Smrgdrm_intel_gem_bo_reference(drm_intel_bo *bo) 46922944501Smrg{ 47022944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 47122944501Smrg 47222944501Smrg atomic_inc(&bo_gem->refcount); 47322944501Smrg} 47422944501Smrg 47522944501Smrg/** 47622944501Smrg * Adds the given buffer to the list of buffers to be validated (moved into the 47722944501Smrg * appropriate memory type) with the next batch submission. 47822944501Smrg * 47922944501Smrg * If a buffer is validated multiple times in a batch submission, it ends up 48022944501Smrg * with the intersection of the memory type flags and the union of the 48122944501Smrg * access flags. 48222944501Smrg */ 48322944501Smrgstatic void 48422944501Smrgdrm_intel_add_validate_buffer(drm_intel_bo *bo) 48522944501Smrg{ 48622944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 48722944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 48822944501Smrg int index; 48922944501Smrg 49022944501Smrg if (bo_gem->validate_index != -1) 49122944501Smrg return; 49222944501Smrg 49322944501Smrg /* Extend the array of validation entries as necessary. */ 49422944501Smrg if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { 49522944501Smrg int new_size = bufmgr_gem->exec_size * 2; 49622944501Smrg 49722944501Smrg if (new_size == 0) 49822944501Smrg new_size = 5; 49922944501Smrg 50022944501Smrg bufmgr_gem->exec_objects = 50122944501Smrg realloc(bufmgr_gem->exec_objects, 50222944501Smrg sizeof(*bufmgr_gem->exec_objects) * new_size); 50322944501Smrg bufmgr_gem->exec_bos = 50422944501Smrg realloc(bufmgr_gem->exec_bos, 50522944501Smrg sizeof(*bufmgr_gem->exec_bos) * new_size); 50622944501Smrg bufmgr_gem->exec_size = new_size; 50722944501Smrg } 50822944501Smrg 50922944501Smrg index = bufmgr_gem->exec_count; 51022944501Smrg bo_gem->validate_index = index; 51122944501Smrg /* Fill in array entry */ 51222944501Smrg bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; 51322944501Smrg bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; 51422944501Smrg bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs; 515fe517fc9Smrg bufmgr_gem->exec_objects[index].alignment = bo->align; 51622944501Smrg bufmgr_gem->exec_objects[index].offset = 0; 51722944501Smrg bufmgr_gem->exec_bos[index] = bo; 51822944501Smrg bufmgr_gem->exec_count++; 51922944501Smrg} 52022944501Smrg 52122944501Smrgstatic void 52222944501Smrgdrm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) 52322944501Smrg{ 52422944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 52522944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 52622944501Smrg int index; 5270655efefSmrg unsigned long flags; 528fe517fc9Smrg 5290655efefSmrg flags = 0; 530fe517fc9Smrg if (need_fence) 531fe517fc9Smrg flags |= EXEC_OBJECT_NEEDS_FENCE; 53222944501Smrg 53322944501Smrg if (bo_gem->validate_index != -1) { 534fe517fc9Smrg bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags; 53522944501Smrg return; 53622944501Smrg } 53722944501Smrg 53822944501Smrg /* Extend the array of validation entries as necessary. */ 53922944501Smrg if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { 54022944501Smrg int new_size = bufmgr_gem->exec_size * 2; 54122944501Smrg 54222944501Smrg if (new_size == 0) 54322944501Smrg new_size = 5; 54422944501Smrg 54522944501Smrg bufmgr_gem->exec2_objects = 54622944501Smrg realloc(bufmgr_gem->exec2_objects, 54722944501Smrg sizeof(*bufmgr_gem->exec2_objects) * new_size); 54822944501Smrg bufmgr_gem->exec_bos = 54922944501Smrg realloc(bufmgr_gem->exec_bos, 55022944501Smrg sizeof(*bufmgr_gem->exec_bos) * new_size); 55122944501Smrg bufmgr_gem->exec_size = new_size; 55222944501Smrg } 55322944501Smrg 55422944501Smrg index = bufmgr_gem->exec_count; 55522944501Smrg bo_gem->validate_index = index; 55622944501Smrg /* Fill in array entry */ 55722944501Smrg bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle; 55822944501Smrg bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count; 55922944501Smrg bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; 560fe517fc9Smrg bufmgr_gem->exec2_objects[index].alignment = bo->align; 5612ee35494Smrg bufmgr_gem->exec2_objects[index].offset = bo->offset64; 5620655efefSmrg bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags | flags; 56322944501Smrg bufmgr_gem->exec2_objects[index].rsvd1 = 0; 56422944501Smrg bufmgr_gem->exec2_objects[index].rsvd2 = 0; 5652ee35494Smrg bufmgr_gem->exec_bos[index] = bo; 56622944501Smrg bufmgr_gem->exec_count++; 56722944501Smrg} 56822944501Smrg 56922944501Smrg#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ 57022944501Smrg sizeof(uint32_t)) 57122944501Smrg 57222944501Smrgstatic void 57322944501Smrgdrm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, 574fe517fc9Smrg drm_intel_bo_gem *bo_gem, 575fe517fc9Smrg unsigned int alignment) 57622944501Smrg{ 577fe517fc9Smrg unsigned int size; 57822944501Smrg 57922944501Smrg assert(!bo_gem->used_as_reloc_target); 58022944501Smrg 58122944501Smrg /* The older chipsets are far-less flexible in terms of tiling, 58222944501Smrg * and require tiled buffer to be size aligned in the aperture. 58322944501Smrg * This means that in the worst possible case we will need a hole 58422944501Smrg * twice as large as the object in order for it to fit into the 58522944501Smrg * aperture. Optimal packing is for wimps. 58622944501Smrg */ 58722944501Smrg size = bo_gem->bo.size; 5889ce4edccSmrg if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) { 589fe517fc9Smrg unsigned int min_size; 5909ce4edccSmrg 5919ce4edccSmrg if (bufmgr_gem->has_relaxed_fencing) { 5929ce4edccSmrg if (bufmgr_gem->gen == 3) 5939ce4edccSmrg min_size = 1024*1024; 5949ce4edccSmrg else 5959ce4edccSmrg min_size = 512*1024; 5969ce4edccSmrg 5979ce4edccSmrg while (min_size < size) 5989ce4edccSmrg min_size *= 2; 5999ce4edccSmrg } else 6009ce4edccSmrg min_size = size; 6019ce4edccSmrg 6029ce4edccSmrg /* Account for worst-case alignment. */ 603fe517fc9Smrg alignment = MAX2(alignment, min_size); 6049ce4edccSmrg } 60522944501Smrg 606fe517fc9Smrg bo_gem->reloc_tree_size = size + alignment; 60722944501Smrg} 60822944501Smrg 60922944501Smrgstatic int 61022944501Smrgdrm_intel_setup_reloc_list(drm_intel_bo *bo) 61122944501Smrg{ 61222944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 61322944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 61422944501Smrg unsigned int max_relocs = bufmgr_gem->max_relocs; 61522944501Smrg 61622944501Smrg if (bo->size / 4 < max_relocs) 61722944501Smrg max_relocs = bo->size / 4; 61822944501Smrg 61922944501Smrg bo_gem->relocs = malloc(max_relocs * 62022944501Smrg sizeof(struct drm_i915_gem_relocation_entry)); 62122944501Smrg bo_gem->reloc_target_info = malloc(max_relocs * 622aaba2545Smrg sizeof(drm_intel_reloc_target)); 62322944501Smrg if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) { 62420131375Smrg bo_gem->has_error = true; 62522944501Smrg 62622944501Smrg free (bo_gem->relocs); 62722944501Smrg bo_gem->relocs = NULL; 62822944501Smrg 62922944501Smrg free (bo_gem->reloc_target_info); 63022944501Smrg bo_gem->reloc_target_info = NULL; 63122944501Smrg 63222944501Smrg return 1; 63322944501Smrg } 63422944501Smrg 63522944501Smrg return 0; 63622944501Smrg} 63722944501Smrg 63822944501Smrgstatic int 63922944501Smrgdrm_intel_gem_bo_busy(drm_intel_bo *bo) 64022944501Smrg{ 64122944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 64222944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 64322944501Smrg struct drm_i915_gem_busy busy; 64422944501Smrg int ret; 64522944501Smrg 64620131375Smrg if (bo_gem->reusable && bo_gem->idle) 64720131375Smrg return false; 64820131375Smrg 649424e9256Smrg memclear(busy); 65022944501Smrg busy.handle = bo_gem->gem_handle; 65122944501Smrg 6526d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); 65320131375Smrg if (ret == 0) { 65420131375Smrg bo_gem->idle = !busy.busy; 65520131375Smrg return busy.busy; 65620131375Smrg } else { 65720131375Smrg return false; 65820131375Smrg } 65922944501Smrg} 66022944501Smrg 66122944501Smrgstatic int 66222944501Smrgdrm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem, 66322944501Smrg drm_intel_bo_gem *bo_gem, int state) 66422944501Smrg{ 66522944501Smrg struct drm_i915_gem_madvise madv; 66622944501Smrg 667424e9256Smrg memclear(madv); 66822944501Smrg madv.handle = bo_gem->gem_handle; 66922944501Smrg madv.madv = state; 67022944501Smrg madv.retained = 1; 6716d98c517Smrg drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv); 67222944501Smrg 67322944501Smrg return madv.retained; 67422944501Smrg} 67522944501Smrg 67622944501Smrgstatic int 67722944501Smrgdrm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv) 67822944501Smrg{ 67922944501Smrg return drm_intel_gem_bo_madvise_internal 68022944501Smrg ((drm_intel_bufmgr_gem *) bo->bufmgr, 68122944501Smrg (drm_intel_bo_gem *) bo, 68222944501Smrg madv); 68322944501Smrg} 68422944501Smrg 68522944501Smrg/* drop the oldest entries that have been purged by the kernel */ 68622944501Smrgstatic void 68722944501Smrgdrm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem, 68822944501Smrg struct drm_intel_gem_bo_bucket *bucket) 68922944501Smrg{ 69022944501Smrg while (!DRMLISTEMPTY(&bucket->head)) { 69122944501Smrg drm_intel_bo_gem *bo_gem; 69222944501Smrg 69322944501Smrg bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 69422944501Smrg bucket->head.next, head); 69522944501Smrg if (drm_intel_gem_bo_madvise_internal 69622944501Smrg (bufmgr_gem, bo_gem, I915_MADV_DONTNEED)) 69722944501Smrg break; 69822944501Smrg 69922944501Smrg DRMLISTDEL(&bo_gem->head); 70022944501Smrg drm_intel_gem_bo_free(&bo_gem->bo); 70122944501Smrg } 70222944501Smrg} 70322944501Smrg 70422944501Smrgstatic drm_intel_bo * 70522944501Smrgdrm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, 70622944501Smrg const char *name, 70722944501Smrg unsigned long size, 7086d98c517Smrg unsigned long flags, 7096d98c517Smrg uint32_t tiling_mode, 710fe517fc9Smrg unsigned long stride, 711fe517fc9Smrg unsigned int alignment) 71222944501Smrg{ 71322944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 71422944501Smrg drm_intel_bo_gem *bo_gem; 71522944501Smrg unsigned int page_size = getpagesize(); 71622944501Smrg int ret; 71722944501Smrg struct drm_intel_gem_bo_bucket *bucket; 71820131375Smrg bool alloc_from_cache; 71922944501Smrg unsigned long bo_size; 72020131375Smrg bool for_render = false; 72122944501Smrg 72222944501Smrg if (flags & BO_ALLOC_FOR_RENDER) 72320131375Smrg for_render = true; 72422944501Smrg 72522944501Smrg /* Round the allocated size up to a power of two number of pages. */ 72622944501Smrg bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size); 72722944501Smrg 72822944501Smrg /* If we don't have caching at this size, don't actually round the 72922944501Smrg * allocation up. 73022944501Smrg */ 73122944501Smrg if (bucket == NULL) { 73222944501Smrg bo_size = size; 73322944501Smrg if (bo_size < page_size) 73422944501Smrg bo_size = page_size; 73522944501Smrg } else { 73622944501Smrg bo_size = bucket->size; 73722944501Smrg } 73822944501Smrg 73922944501Smrg pthread_mutex_lock(&bufmgr_gem->lock); 74022944501Smrg /* Get a buffer out of the cache if available */ 74122944501Smrgretry: 74220131375Smrg alloc_from_cache = false; 74322944501Smrg if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) { 74422944501Smrg if (for_render) { 74522944501Smrg /* Allocate new render-target BOs from the tail (MRU) 74622944501Smrg * of the list, as it will likely be hot in the GPU 74722944501Smrg * cache and in the aperture for us. 74822944501Smrg */ 74922944501Smrg bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 75022944501Smrg bucket->head.prev, head); 75122944501Smrg DRMLISTDEL(&bo_gem->head); 75220131375Smrg alloc_from_cache = true; 753fe517fc9Smrg bo_gem->bo.align = alignment; 75422944501Smrg } else { 755fe517fc9Smrg assert(alignment == 0); 75622944501Smrg /* For non-render-target BOs (where we're probably 75722944501Smrg * going to map it first thing in order to fill it 75822944501Smrg * with data), check if the last BO in the cache is 75922944501Smrg * unbusy, and only reuse in that case. Otherwise, 76022944501Smrg * allocating a new buffer is probably faster than 76122944501Smrg * waiting for the GPU to finish. 76222944501Smrg */ 76322944501Smrg bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 76422944501Smrg bucket->head.next, head); 76522944501Smrg if (!drm_intel_gem_bo_busy(&bo_gem->bo)) { 76620131375Smrg alloc_from_cache = true; 76722944501Smrg DRMLISTDEL(&bo_gem->head); 76822944501Smrg } 76922944501Smrg } 77022944501Smrg 77122944501Smrg if (alloc_from_cache) { 77222944501Smrg if (!drm_intel_gem_bo_madvise_internal 77322944501Smrg (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) { 77422944501Smrg drm_intel_gem_bo_free(&bo_gem->bo); 77522944501Smrg drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, 77622944501Smrg bucket); 77722944501Smrg goto retry; 77822944501Smrg } 7796d98c517Smrg 7806d98c517Smrg if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, 7816d98c517Smrg tiling_mode, 7826d98c517Smrg stride)) { 7836d98c517Smrg drm_intel_gem_bo_free(&bo_gem->bo); 7846d98c517Smrg goto retry; 7856d98c517Smrg } 78622944501Smrg } 78722944501Smrg } 78822944501Smrg 78922944501Smrg if (!alloc_from_cache) { 79022944501Smrg struct drm_i915_gem_create create; 79122944501Smrg 79222944501Smrg bo_gem = calloc(1, sizeof(*bo_gem)); 79322944501Smrg if (!bo_gem) 7942ee35494Smrg goto err; 7952ee35494Smrg 7962ee35494Smrg /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized 7972ee35494Smrg list (vma_list), so better set the list head here */ 7982ee35494Smrg DRMINITLISTHEAD(&bo_gem->vma_list); 79922944501Smrg 80022944501Smrg bo_gem->bo.size = bo_size; 80120131375Smrg 802424e9256Smrg memclear(create); 80322944501Smrg create.size = bo_size; 80422944501Smrg 8056d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 8066d98c517Smrg DRM_IOCTL_I915_GEM_CREATE, 8076d98c517Smrg &create); 80822944501Smrg if (ret != 0) { 80922944501Smrg free(bo_gem); 8102ee35494Smrg goto err; 81122944501Smrg } 8122ee35494Smrg 8132ee35494Smrg bo_gem->gem_handle = create.handle; 8140655efefSmrg HASH_ADD(handle_hh, bufmgr_gem->handle_table, 8150655efefSmrg gem_handle, sizeof(bo_gem->gem_handle), 8160655efefSmrg bo_gem); 8170655efefSmrg 8182ee35494Smrg bo_gem->bo.handle = bo_gem->gem_handle; 81922944501Smrg bo_gem->bo.bufmgr = bufmgr; 820fe517fc9Smrg bo_gem->bo.align = alignment; 8216d98c517Smrg 8226d98c517Smrg bo_gem->tiling_mode = I915_TILING_NONE; 8236d98c517Smrg bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 8246d98c517Smrg bo_gem->stride = 0; 8256d98c517Smrg 8266d98c517Smrg if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, 8276d98c517Smrg tiling_mode, 8282ee35494Smrg stride)) 8292ee35494Smrg goto err_free; 83022944501Smrg } 83122944501Smrg 83222944501Smrg bo_gem->name = name; 83322944501Smrg atomic_set(&bo_gem->refcount, 1); 83422944501Smrg bo_gem->validate_index = -1; 83522944501Smrg bo_gem->reloc_tree_fences = 0; 83620131375Smrg bo_gem->used_as_reloc_target = false; 83720131375Smrg bo_gem->has_error = false; 83820131375Smrg bo_gem->reusable = true; 83922944501Smrg 840fe517fc9Smrg drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment); 8412ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 84222944501Smrg 84322944501Smrg DBG("bo_create: buf %d (%s) %ldb\n", 84422944501Smrg bo_gem->gem_handle, bo_gem->name, size); 84522944501Smrg 84622944501Smrg return &bo_gem->bo; 8472ee35494Smrg 8482ee35494Smrgerr_free: 8492ee35494Smrg drm_intel_gem_bo_free(&bo_gem->bo); 8502ee35494Smrgerr: 8512ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 8522ee35494Smrg return NULL; 85322944501Smrg} 85422944501Smrg 85522944501Smrgstatic drm_intel_bo * 85622944501Smrgdrm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, 85722944501Smrg const char *name, 85822944501Smrg unsigned long size, 85922944501Smrg unsigned int alignment) 86022944501Smrg{ 86122944501Smrg return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 8626d98c517Smrg BO_ALLOC_FOR_RENDER, 863fe517fc9Smrg I915_TILING_NONE, 0, 864fe517fc9Smrg alignment); 86522944501Smrg} 86622944501Smrg 86722944501Smrgstatic drm_intel_bo * 86822944501Smrgdrm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, 86922944501Smrg const char *name, 87022944501Smrg unsigned long size, 87122944501Smrg unsigned int alignment) 87222944501Smrg{ 8736d98c517Smrg return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0, 874fe517fc9Smrg I915_TILING_NONE, 0, 0); 87522944501Smrg} 87622944501Smrg 87722944501Smrgstatic drm_intel_bo * 87822944501Smrgdrm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, 87922944501Smrg int x, int y, int cpp, uint32_t *tiling_mode, 88022944501Smrg unsigned long *pitch, unsigned long flags) 88122944501Smrg{ 88222944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 883aaba2545Smrg unsigned long size, stride; 884aaba2545Smrg uint32_t tiling; 88522944501Smrg 886aaba2545Smrg do { 88720131375Smrg unsigned long aligned_y, height_alignment; 888aaba2545Smrg 889aaba2545Smrg tiling = *tiling_mode; 890aaba2545Smrg 891aaba2545Smrg /* If we're tiled, our allocations are in 8 or 32-row blocks, 892aaba2545Smrg * so failure to align our height means that we won't allocate 893aaba2545Smrg * enough pages. 894aaba2545Smrg * 895aaba2545Smrg * If we're untiled, we still have to align to 2 rows high 896aaba2545Smrg * because the data port accesses 2x2 blocks even if the 897aaba2545Smrg * bottom row isn't to be rendered, so failure to align means 898aaba2545Smrg * we could walk off the end of the GTT and fault. This is 899aaba2545Smrg * documented on 965, and may be the case on older chipsets 900aaba2545Smrg * too so we try to be careful. 901aaba2545Smrg */ 902aaba2545Smrg aligned_y = y; 90320131375Smrg height_alignment = 2; 90420131375Smrg 90520131375Smrg if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE) 90620131375Smrg height_alignment = 16; 90720131375Smrg else if (tiling == I915_TILING_X 90820131375Smrg || (IS_915(bufmgr_gem->pci_device) 90920131375Smrg && tiling == I915_TILING_Y)) 91020131375Smrg height_alignment = 8; 911aaba2545Smrg else if (tiling == I915_TILING_Y) 91220131375Smrg height_alignment = 32; 91320131375Smrg aligned_y = ALIGN(y, height_alignment); 914aaba2545Smrg 915aaba2545Smrg stride = x * cpp; 9166d98c517Smrg stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode); 917aaba2545Smrg size = stride * aligned_y; 918aaba2545Smrg size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode); 919aaba2545Smrg } while (*tiling_mode != tiling); 92022944501Smrg *pitch = stride; 92122944501Smrg 9226d98c517Smrg if (tiling == I915_TILING_NONE) 9236d98c517Smrg stride = 0; 9246d98c517Smrg 9256d98c517Smrg return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags, 926fe517fc9Smrg tiling, stride, 0); 92722944501Smrg} 92822944501Smrg 929a884aba1Smrgstatic drm_intel_bo * 930a884aba1Smrgdrm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, 931a884aba1Smrg const char *name, 932a884aba1Smrg void *addr, 933a884aba1Smrg uint32_t tiling_mode, 934a884aba1Smrg uint32_t stride, 935a884aba1Smrg unsigned long size, 936a884aba1Smrg unsigned long flags) 937a884aba1Smrg{ 938a884aba1Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 939a884aba1Smrg drm_intel_bo_gem *bo_gem; 940a884aba1Smrg int ret; 941a884aba1Smrg struct drm_i915_gem_userptr userptr; 942a884aba1Smrg 943a884aba1Smrg /* Tiling with userptr surfaces is not supported 944a884aba1Smrg * on all hardware so refuse it for time being. 945a884aba1Smrg */ 946a884aba1Smrg if (tiling_mode != I915_TILING_NONE) 947a884aba1Smrg return NULL; 948a884aba1Smrg 949a884aba1Smrg bo_gem = calloc(1, sizeof(*bo_gem)); 950a884aba1Smrg if (!bo_gem) 951a884aba1Smrg return NULL; 952a884aba1Smrg 9532ee35494Smrg atomic_set(&bo_gem->refcount, 1); 9542ee35494Smrg DRMINITLISTHEAD(&bo_gem->vma_list); 9552ee35494Smrg 956a884aba1Smrg bo_gem->bo.size = size; 957a884aba1Smrg 958424e9256Smrg memclear(userptr); 959a884aba1Smrg userptr.user_ptr = (__u64)((unsigned long)addr); 960a884aba1Smrg userptr.user_size = size; 961a884aba1Smrg userptr.flags = flags; 962a884aba1Smrg 963a884aba1Smrg ret = drmIoctl(bufmgr_gem->fd, 964a884aba1Smrg DRM_IOCTL_I915_GEM_USERPTR, 965a884aba1Smrg &userptr); 966a884aba1Smrg if (ret != 0) { 967a884aba1Smrg DBG("bo_create_userptr: " 968a884aba1Smrg "ioctl failed with user ptr %p size 0x%lx, " 969a884aba1Smrg "user flags 0x%lx\n", addr, size, flags); 970a884aba1Smrg free(bo_gem); 971a884aba1Smrg return NULL; 972a884aba1Smrg } 973a884aba1Smrg 9742ee35494Smrg pthread_mutex_lock(&bufmgr_gem->lock); 9752ee35494Smrg 976a884aba1Smrg bo_gem->gem_handle = userptr.handle; 977a884aba1Smrg bo_gem->bo.handle = bo_gem->gem_handle; 978a884aba1Smrg bo_gem->bo.bufmgr = bufmgr; 979a884aba1Smrg bo_gem->is_userptr = true; 980a884aba1Smrg bo_gem->bo.virtual = addr; 981a884aba1Smrg /* Save the address provided by user */ 982a884aba1Smrg bo_gem->user_virtual = addr; 983a884aba1Smrg bo_gem->tiling_mode = I915_TILING_NONE; 984a884aba1Smrg bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 985a884aba1Smrg bo_gem->stride = 0; 986a884aba1Smrg 9872ee35494Smrg HASH_ADD(handle_hh, bufmgr_gem->handle_table, 9882ee35494Smrg gem_handle, sizeof(bo_gem->gem_handle), 9892ee35494Smrg bo_gem); 990a884aba1Smrg 991a884aba1Smrg bo_gem->name = name; 992a884aba1Smrg bo_gem->validate_index = -1; 993a884aba1Smrg bo_gem->reloc_tree_fences = 0; 994a884aba1Smrg bo_gem->used_as_reloc_target = false; 995a884aba1Smrg bo_gem->has_error = false; 996a884aba1Smrg bo_gem->reusable = false; 997a884aba1Smrg 998fe517fc9Smrg drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 9992ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 1000a884aba1Smrg 1001a884aba1Smrg DBG("bo_create_userptr: " 1002a884aba1Smrg "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n", 1003a884aba1Smrg addr, bo_gem->gem_handle, bo_gem->name, 1004a884aba1Smrg size, stride, tiling_mode); 1005a884aba1Smrg 1006a884aba1Smrg return &bo_gem->bo; 1007a884aba1Smrg} 1008a884aba1Smrg 1009424e9256Smrgstatic bool 1010424e9256Smrghas_userptr(drm_intel_bufmgr_gem *bufmgr_gem) 1011424e9256Smrg{ 1012424e9256Smrg int ret; 1013424e9256Smrg void *ptr; 1014424e9256Smrg long pgsz; 1015424e9256Smrg struct drm_i915_gem_userptr userptr; 1016424e9256Smrg 1017424e9256Smrg pgsz = sysconf(_SC_PAGESIZE); 1018424e9256Smrg assert(pgsz > 0); 1019424e9256Smrg 1020424e9256Smrg ret = posix_memalign(&ptr, pgsz, pgsz); 1021424e9256Smrg if (ret) { 1022424e9256Smrg DBG("Failed to get a page (%ld) for userptr detection!\n", 1023424e9256Smrg pgsz); 1024424e9256Smrg return false; 1025424e9256Smrg } 1026424e9256Smrg 1027424e9256Smrg memclear(userptr); 1028424e9256Smrg userptr.user_ptr = (__u64)(unsigned long)ptr; 1029424e9256Smrg userptr.user_size = pgsz; 1030424e9256Smrg 1031424e9256Smrgretry: 1032424e9256Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr); 1033424e9256Smrg if (ret) { 1034424e9256Smrg if (errno == ENODEV && userptr.flags == 0) { 1035424e9256Smrg userptr.flags = I915_USERPTR_UNSYNCHRONIZED; 1036424e9256Smrg goto retry; 1037424e9256Smrg } 1038424e9256Smrg free(ptr); 1039424e9256Smrg return false; 1040424e9256Smrg } 1041424e9256Smrg 1042424e9256Smrg /* We don't release the userptr bo here as we want to keep the 1043424e9256Smrg * kernel mm tracking alive for our lifetime. The first time we 1044424e9256Smrg * create a userptr object the kernel has to install a mmu_notifer 1045424e9256Smrg * which is a heavyweight operation (e.g. it requires taking all 1046424e9256Smrg * mm_locks and stop_machine()). 1047424e9256Smrg */ 1048424e9256Smrg 1049424e9256Smrg bufmgr_gem->userptr_active.ptr = ptr; 1050424e9256Smrg bufmgr_gem->userptr_active.handle = userptr.handle; 1051424e9256Smrg 1052424e9256Smrg return true; 1053424e9256Smrg} 1054424e9256Smrg 1055424e9256Smrgstatic drm_intel_bo * 1056424e9256Smrgcheck_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, 1057424e9256Smrg const char *name, 1058424e9256Smrg void *addr, 1059424e9256Smrg uint32_t tiling_mode, 1060424e9256Smrg uint32_t stride, 1061424e9256Smrg unsigned long size, 1062424e9256Smrg unsigned long flags) 1063424e9256Smrg{ 1064424e9256Smrg if (has_userptr((drm_intel_bufmgr_gem *)bufmgr)) 1065424e9256Smrg bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr; 1066424e9256Smrg else 1067424e9256Smrg bufmgr->bo_alloc_userptr = NULL; 1068424e9256Smrg 1069424e9256Smrg return drm_intel_bo_alloc_userptr(bufmgr, name, addr, 1070424e9256Smrg tiling_mode, stride, size, flags); 1071424e9256Smrg} 1072424e9256Smrg 107322944501Smrg/** 107422944501Smrg * Returns a drm_intel_bo wrapping the given buffer object handle. 107522944501Smrg * 107622944501Smrg * This can be used when one application needs to pass a buffer object 107722944501Smrg * to another. 107822944501Smrg */ 10796260e5d5Smrgdrm_public drm_intel_bo * 108022944501Smrgdrm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, 108122944501Smrg const char *name, 108222944501Smrg unsigned int handle) 108322944501Smrg{ 108422944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 108522944501Smrg drm_intel_bo_gem *bo_gem; 108622944501Smrg int ret; 108722944501Smrg struct drm_gem_open open_arg; 108822944501Smrg struct drm_i915_gem_get_tiling get_tiling; 108922944501Smrg 109020131375Smrg /* At the moment most applications only have a few named bo. 109120131375Smrg * For instance, in a DRI client only the render buffers passed 109220131375Smrg * between X and the client are named. And since X returns the 109320131375Smrg * alternating names for the front/back buffer a linear search 109420131375Smrg * provides a sufficiently fast match. 109520131375Smrg */ 1096a884aba1Smrg pthread_mutex_lock(&bufmgr_gem->lock); 10972ee35494Smrg HASH_FIND(name_hh, bufmgr_gem->name_table, 10982ee35494Smrg &handle, sizeof(handle), bo_gem); 10992ee35494Smrg if (bo_gem) { 11002ee35494Smrg drm_intel_gem_bo_reference(&bo_gem->bo); 11012ee35494Smrg goto out; 110220131375Smrg } 110322944501Smrg 1104424e9256Smrg memclear(open_arg); 110522944501Smrg open_arg.name = handle; 11066d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 11076d98c517Smrg DRM_IOCTL_GEM_OPEN, 11086d98c517Smrg &open_arg); 110922944501Smrg if (ret != 0) { 11109ce4edccSmrg DBG("Couldn't reference %s handle 0x%08x: %s\n", 11119ce4edccSmrg name, handle, strerror(errno)); 11122ee35494Smrg bo_gem = NULL; 11132ee35494Smrg goto out; 111422944501Smrg } 111520131375Smrg /* Now see if someone has used a prime handle to get this 111620131375Smrg * object from the kernel before by looking through the list 111720131375Smrg * again for a matching gem_handle 111820131375Smrg */ 11192ee35494Smrg HASH_FIND(handle_hh, bufmgr_gem->handle_table, 11202ee35494Smrg &open_arg.handle, sizeof(open_arg.handle), bo_gem); 11212ee35494Smrg if (bo_gem) { 11222ee35494Smrg drm_intel_gem_bo_reference(&bo_gem->bo); 11232ee35494Smrg goto out; 112420131375Smrg } 112520131375Smrg 112620131375Smrg bo_gem = calloc(1, sizeof(*bo_gem)); 11272ee35494Smrg if (!bo_gem) 11282ee35494Smrg goto out; 11292ee35494Smrg 11302ee35494Smrg atomic_set(&bo_gem->refcount, 1); 11312ee35494Smrg DRMINITLISTHEAD(&bo_gem->vma_list); 113220131375Smrg 113322944501Smrg bo_gem->bo.size = open_arg.size; 113422944501Smrg bo_gem->bo.offset = 0; 113520131375Smrg bo_gem->bo.offset64 = 0; 113622944501Smrg bo_gem->bo.virtual = NULL; 113722944501Smrg bo_gem->bo.bufmgr = bufmgr; 113822944501Smrg bo_gem->name = name; 113922944501Smrg bo_gem->validate_index = -1; 114022944501Smrg bo_gem->gem_handle = open_arg.handle; 114120131375Smrg bo_gem->bo.handle = open_arg.handle; 114222944501Smrg bo_gem->global_name = handle; 114320131375Smrg bo_gem->reusable = false; 114422944501Smrg 11452ee35494Smrg HASH_ADD(handle_hh, bufmgr_gem->handle_table, 11462ee35494Smrg gem_handle, sizeof(bo_gem->gem_handle), bo_gem); 11472ee35494Smrg HASH_ADD(name_hh, bufmgr_gem->name_table, 11482ee35494Smrg global_name, sizeof(bo_gem->global_name), bo_gem); 11492ee35494Smrg 1150424e9256Smrg memclear(get_tiling); 115122944501Smrg get_tiling.handle = bo_gem->gem_handle; 11526d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 11536d98c517Smrg DRM_IOCTL_I915_GEM_GET_TILING, 11546d98c517Smrg &get_tiling); 11552ee35494Smrg if (ret != 0) 11562ee35494Smrg goto err_unref; 11572ee35494Smrg 115822944501Smrg bo_gem->tiling_mode = get_tiling.tiling_mode; 115922944501Smrg bo_gem->swizzle_mode = get_tiling.swizzle_mode; 11606d98c517Smrg /* XXX stride is unknown */ 1161fe517fc9Smrg drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 116222944501Smrg DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); 116322944501Smrg 11642ee35494Smrgout: 11652ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 116622944501Smrg return &bo_gem->bo; 11672ee35494Smrg 11682ee35494Smrgerr_unref: 11692ee35494Smrg drm_intel_gem_bo_free(&bo_gem->bo); 11702ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 11712ee35494Smrg return NULL; 117222944501Smrg} 117322944501Smrg 117422944501Smrgstatic void 117522944501Smrgdrm_intel_gem_bo_free(drm_intel_bo *bo) 117622944501Smrg{ 117722944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 117822944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 117922944501Smrg struct drm_gem_close close; 118022944501Smrg int ret; 118122944501Smrg 118220131375Smrg DRMLISTDEL(&bo_gem->vma_list); 118320131375Smrg if (bo_gem->mem_virtual) { 118420131375Smrg VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0)); 1185a884aba1Smrg drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size); 118620131375Smrg bufmgr_gem->vma_count--; 118720131375Smrg } 11882ee35494Smrg if (bo_gem->wc_virtual) { 11892ee35494Smrg VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0)); 11902ee35494Smrg drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size); 11912ee35494Smrg bufmgr_gem->vma_count--; 11922ee35494Smrg } 119320131375Smrg if (bo_gem->gtt_virtual) { 1194a884aba1Smrg drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size); 119520131375Smrg bufmgr_gem->vma_count--; 119620131375Smrg } 119722944501Smrg 11982ee35494Smrg if (bo_gem->global_name) 11992ee35494Smrg HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem); 12002ee35494Smrg HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem); 12012ee35494Smrg 120222944501Smrg /* Close this object */ 1203424e9256Smrg memclear(close); 120422944501Smrg close.handle = bo_gem->gem_handle; 12056d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); 120622944501Smrg if (ret != 0) { 12079ce4edccSmrg DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", 12089ce4edccSmrg bo_gem->gem_handle, bo_gem->name, strerror(errno)); 120922944501Smrg } 121022944501Smrg free(bo); 121122944501Smrg} 121222944501Smrg 121320131375Smrgstatic void 121420131375Smrgdrm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo) 121520131375Smrg{ 121620131375Smrg#if HAVE_VALGRIND 121720131375Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 121820131375Smrg 121920131375Smrg if (bo_gem->mem_virtual) 122020131375Smrg VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size); 122120131375Smrg 12222ee35494Smrg if (bo_gem->wc_virtual) 12232ee35494Smrg VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size); 12242ee35494Smrg 122520131375Smrg if (bo_gem->gtt_virtual) 122620131375Smrg VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size); 122720131375Smrg#endif 122820131375Smrg} 122920131375Smrg 123022944501Smrg/** Frees all cached buffers significantly older than @time. */ 123122944501Smrgstatic void 123222944501Smrgdrm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) 123322944501Smrg{ 123422944501Smrg int i; 123522944501Smrg 12366d98c517Smrg if (bufmgr_gem->time == time) 12376d98c517Smrg return; 12386d98c517Smrg 1239aaba2545Smrg for (i = 0; i < bufmgr_gem->num_buckets; i++) { 124022944501Smrg struct drm_intel_gem_bo_bucket *bucket = 124122944501Smrg &bufmgr_gem->cache_bucket[i]; 124222944501Smrg 124322944501Smrg while (!DRMLISTEMPTY(&bucket->head)) { 124422944501Smrg drm_intel_bo_gem *bo_gem; 124522944501Smrg 124622944501Smrg bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 124722944501Smrg bucket->head.next, head); 124822944501Smrg if (time - bo_gem->free_time <= 1) 124922944501Smrg break; 125022944501Smrg 125122944501Smrg DRMLISTDEL(&bo_gem->head); 125222944501Smrg 125322944501Smrg drm_intel_gem_bo_free(&bo_gem->bo); 125422944501Smrg } 125522944501Smrg } 12566d98c517Smrg 12576d98c517Smrg bufmgr_gem->time = time; 125822944501Smrg} 125922944501Smrg 126020131375Smrgstatic void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem) 126120131375Smrg{ 126220131375Smrg int limit; 126320131375Smrg 126420131375Smrg DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__, 126520131375Smrg bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max); 126620131375Smrg 126720131375Smrg if (bufmgr_gem->vma_max < 0) 126820131375Smrg return; 126920131375Smrg 127020131375Smrg /* We may need to evict a few entries in order to create new mmaps */ 127120131375Smrg limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open; 127220131375Smrg if (limit < 0) 127320131375Smrg limit = 0; 127420131375Smrg 127520131375Smrg while (bufmgr_gem->vma_count > limit) { 127620131375Smrg drm_intel_bo_gem *bo_gem; 127720131375Smrg 127820131375Smrg bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 127920131375Smrg bufmgr_gem->vma_cache.next, 128020131375Smrg vma_list); 128120131375Smrg assert(bo_gem->map_count == 0); 128220131375Smrg DRMLISTDELINIT(&bo_gem->vma_list); 128320131375Smrg 128420131375Smrg if (bo_gem->mem_virtual) { 1285a884aba1Smrg drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size); 128620131375Smrg bo_gem->mem_virtual = NULL; 128720131375Smrg bufmgr_gem->vma_count--; 128820131375Smrg } 12892ee35494Smrg if (bo_gem->wc_virtual) { 12902ee35494Smrg drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size); 12912ee35494Smrg bo_gem->wc_virtual = NULL; 12922ee35494Smrg bufmgr_gem->vma_count--; 12932ee35494Smrg } 129420131375Smrg if (bo_gem->gtt_virtual) { 1295a884aba1Smrg drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size); 129620131375Smrg bo_gem->gtt_virtual = NULL; 129720131375Smrg bufmgr_gem->vma_count--; 129820131375Smrg } 129920131375Smrg } 130020131375Smrg} 130120131375Smrg 130220131375Smrgstatic void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem, 130320131375Smrg drm_intel_bo_gem *bo_gem) 130420131375Smrg{ 130520131375Smrg bufmgr_gem->vma_open--; 130620131375Smrg DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache); 130720131375Smrg if (bo_gem->mem_virtual) 130820131375Smrg bufmgr_gem->vma_count++; 13092ee35494Smrg if (bo_gem->wc_virtual) 13102ee35494Smrg bufmgr_gem->vma_count++; 131120131375Smrg if (bo_gem->gtt_virtual) 131220131375Smrg bufmgr_gem->vma_count++; 131320131375Smrg drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 131420131375Smrg} 131520131375Smrg 131620131375Smrgstatic void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem, 131720131375Smrg drm_intel_bo_gem *bo_gem) 131820131375Smrg{ 131920131375Smrg bufmgr_gem->vma_open++; 132020131375Smrg DRMLISTDEL(&bo_gem->vma_list); 132120131375Smrg if (bo_gem->mem_virtual) 132220131375Smrg bufmgr_gem->vma_count--; 13232ee35494Smrg if (bo_gem->wc_virtual) 13242ee35494Smrg bufmgr_gem->vma_count--; 132520131375Smrg if (bo_gem->gtt_virtual) 132620131375Smrg bufmgr_gem->vma_count--; 132720131375Smrg drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 132820131375Smrg} 132920131375Smrg 133022944501Smrgstatic void 133122944501Smrgdrm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) 133222944501Smrg{ 133322944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 133422944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 133522944501Smrg struct drm_intel_gem_bo_bucket *bucket; 133622944501Smrg int i; 133722944501Smrg 133822944501Smrg /* Unreference all the target buffers */ 133922944501Smrg for (i = 0; i < bo_gem->reloc_count; i++) { 1340aaba2545Smrg if (bo_gem->reloc_target_info[i].bo != bo) { 1341aaba2545Smrg drm_intel_gem_bo_unreference_locked_timed(bo_gem-> 1342aaba2545Smrg reloc_target_info[i].bo, 1343aaba2545Smrg time); 1344aaba2545Smrg } 134522944501Smrg } 1346fe517fc9Smrg for (i = 0; i < bo_gem->softpin_target_count; i++) 1347fe517fc9Smrg drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i], 1348fe517fc9Smrg time); 13492ee35494Smrg bo_gem->kflags = 0; 135022944501Smrg bo_gem->reloc_count = 0; 135120131375Smrg bo_gem->used_as_reloc_target = false; 1352fe517fc9Smrg bo_gem->softpin_target_count = 0; 135322944501Smrg 135422944501Smrg DBG("bo_unreference final: %d (%s)\n", 135522944501Smrg bo_gem->gem_handle, bo_gem->name); 135622944501Smrg 135722944501Smrg /* release memory associated with this object */ 135822944501Smrg if (bo_gem->reloc_target_info) { 135922944501Smrg free(bo_gem->reloc_target_info); 136022944501Smrg bo_gem->reloc_target_info = NULL; 136122944501Smrg } 136222944501Smrg if (bo_gem->relocs) { 136322944501Smrg free(bo_gem->relocs); 136422944501Smrg bo_gem->relocs = NULL; 136522944501Smrg } 1366fe517fc9Smrg if (bo_gem->softpin_target) { 1367fe517fc9Smrg free(bo_gem->softpin_target); 1368fe517fc9Smrg bo_gem->softpin_target = NULL; 1369fe517fc9Smrg bo_gem->softpin_target_size = 0; 1370fe517fc9Smrg } 137122944501Smrg 137220131375Smrg /* Clear any left-over mappings */ 137320131375Smrg if (bo_gem->map_count) { 137420131375Smrg DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count); 137520131375Smrg bo_gem->map_count = 0; 137620131375Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 137720131375Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 137820131375Smrg } 137920131375Smrg 138022944501Smrg bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); 138122944501Smrg /* Put the buffer into our internal cache for reuse if we can. */ 138222944501Smrg if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL && 138322944501Smrg drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem, 138422944501Smrg I915_MADV_DONTNEED)) { 138522944501Smrg bo_gem->free_time = time; 138622944501Smrg 138722944501Smrg bo_gem->name = NULL; 138822944501Smrg bo_gem->validate_index = -1; 138922944501Smrg 139022944501Smrg DRMLISTADDTAIL(&bo_gem->head, &bucket->head); 139122944501Smrg } else { 139222944501Smrg drm_intel_gem_bo_free(bo); 139322944501Smrg } 139422944501Smrg} 139522944501Smrg 139622944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, 139722944501Smrg time_t time) 139822944501Smrg{ 139922944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 140022944501Smrg 140122944501Smrg assert(atomic_read(&bo_gem->refcount) > 0); 140222944501Smrg if (atomic_dec_and_test(&bo_gem->refcount)) 140322944501Smrg drm_intel_gem_bo_unreference_final(bo, time); 140422944501Smrg} 140522944501Smrg 140622944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo) 140722944501Smrg{ 140822944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 140922944501Smrg 141022944501Smrg assert(atomic_read(&bo_gem->refcount) > 0); 1411a884aba1Smrg 1412a884aba1Smrg if (atomic_add_unless(&bo_gem->refcount, -1, 1)) { 141322944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = 141422944501Smrg (drm_intel_bufmgr_gem *) bo->bufmgr; 141522944501Smrg struct timespec time; 141622944501Smrg 141722944501Smrg clock_gettime(CLOCK_MONOTONIC, &time); 141822944501Smrg 141922944501Smrg pthread_mutex_lock(&bufmgr_gem->lock); 1420a884aba1Smrg 1421a884aba1Smrg if (atomic_dec_and_test(&bo_gem->refcount)) { 1422a884aba1Smrg drm_intel_gem_bo_unreference_final(bo, time.tv_sec); 1423a884aba1Smrg drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec); 1424a884aba1Smrg } 1425a884aba1Smrg 142622944501Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 142722944501Smrg } 142822944501Smrg} 142922944501Smrg 143022944501Smrgstatic int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) 143122944501Smrg{ 143222944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 143322944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 143422944501Smrg struct drm_i915_gem_set_domain set_domain; 143522944501Smrg int ret; 143622944501Smrg 1437a884aba1Smrg if (bo_gem->is_userptr) { 1438a884aba1Smrg /* Return the same user ptr */ 1439a884aba1Smrg bo->virtual = bo_gem->user_virtual; 1440a884aba1Smrg return 0; 1441a884aba1Smrg } 1442a884aba1Smrg 144322944501Smrg pthread_mutex_lock(&bufmgr_gem->lock); 144422944501Smrg 144520131375Smrg if (bo_gem->map_count++ == 0) 144620131375Smrg drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); 144720131375Smrg 144822944501Smrg if (!bo_gem->mem_virtual) { 144922944501Smrg struct drm_i915_gem_mmap mmap_arg; 145022944501Smrg 145120131375Smrg DBG("bo_map: %d (%s), map_count=%d\n", 145220131375Smrg bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); 145322944501Smrg 1454424e9256Smrg memclear(mmap_arg); 145522944501Smrg mmap_arg.handle = bo_gem->gem_handle; 145622944501Smrg mmap_arg.size = bo->size; 14576d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 14586d98c517Smrg DRM_IOCTL_I915_GEM_MMAP, 14596d98c517Smrg &mmap_arg); 146022944501Smrg if (ret != 0) { 146122944501Smrg ret = -errno; 14629ce4edccSmrg DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", 14639ce4edccSmrg __FILE__, __LINE__, bo_gem->gem_handle, 14649ce4edccSmrg bo_gem->name, strerror(errno)); 146520131375Smrg if (--bo_gem->map_count == 0) 146620131375Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 146722944501Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 146822944501Smrg return ret; 146922944501Smrg } 147020131375Smrg VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); 147122944501Smrg bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr; 147222944501Smrg } 147322944501Smrg DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, 147422944501Smrg bo_gem->mem_virtual); 147522944501Smrg bo->virtual = bo_gem->mem_virtual; 147622944501Smrg 1477424e9256Smrg memclear(set_domain); 147822944501Smrg set_domain.handle = bo_gem->gem_handle; 147922944501Smrg set_domain.read_domains = I915_GEM_DOMAIN_CPU; 148022944501Smrg if (write_enable) 148122944501Smrg set_domain.write_domain = I915_GEM_DOMAIN_CPU; 148222944501Smrg else 148322944501Smrg set_domain.write_domain = 0; 14846d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 14856d98c517Smrg DRM_IOCTL_I915_GEM_SET_DOMAIN, 14866d98c517Smrg &set_domain); 148722944501Smrg if (ret != 0) { 14889ce4edccSmrg DBG("%s:%d: Error setting to CPU domain %d: %s\n", 14899ce4edccSmrg __FILE__, __LINE__, bo_gem->gem_handle, 14909ce4edccSmrg strerror(errno)); 149122944501Smrg } 149222944501Smrg 149320131375Smrg if (write_enable) 149420131375Smrg bo_gem->mapped_cpu_write = true; 149520131375Smrg 149620131375Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 149720131375Smrg VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size)); 149822944501Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 149922944501Smrg 150022944501Smrg return 0; 150122944501Smrg} 150222944501Smrg 150320131375Smrgstatic int 150420131375Smrgmap_gtt(drm_intel_bo *bo) 150522944501Smrg{ 150622944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 150722944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 150822944501Smrg int ret; 150922944501Smrg 1510a884aba1Smrg if (bo_gem->is_userptr) 1511a884aba1Smrg return -EINVAL; 1512a884aba1Smrg 151320131375Smrg if (bo_gem->map_count++ == 0) 151420131375Smrg drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); 151522944501Smrg 151622944501Smrg /* Get a mapping of the buffer if we haven't before. */ 151722944501Smrg if (bo_gem->gtt_virtual == NULL) { 151822944501Smrg struct drm_i915_gem_mmap_gtt mmap_arg; 151922944501Smrg 152020131375Smrg DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", 152120131375Smrg bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); 152222944501Smrg 1523424e9256Smrg memclear(mmap_arg); 152422944501Smrg mmap_arg.handle = bo_gem->gem_handle; 152522944501Smrg 152622944501Smrg /* Get the fake offset back... */ 15276d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 15286d98c517Smrg DRM_IOCTL_I915_GEM_MMAP_GTT, 15296d98c517Smrg &mmap_arg); 153022944501Smrg if (ret != 0) { 153122944501Smrg ret = -errno; 15329ce4edccSmrg DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n", 15339ce4edccSmrg __FILE__, __LINE__, 15349ce4edccSmrg bo_gem->gem_handle, bo_gem->name, 15359ce4edccSmrg strerror(errno)); 153620131375Smrg if (--bo_gem->map_count == 0) 153720131375Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 153822944501Smrg return ret; 153922944501Smrg } 154022944501Smrg 154122944501Smrg /* and mmap it */ 1542aec75c42Sriastradh ret = drmMap(bufmgr_gem->fd, mmap_arg.offset, bo->size, 1543aec75c42Sriastradh &bo_gem->gtt_virtual); 1544aec75c42Sriastradh if (ret) { 154522944501Smrg bo_gem->gtt_virtual = NULL; 15469ce4edccSmrg DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", 15479ce4edccSmrg __FILE__, __LINE__, 15489ce4edccSmrg bo_gem->gem_handle, bo_gem->name, 15499ce4edccSmrg strerror(errno)); 155020131375Smrg if (--bo_gem->map_count == 0) 155120131375Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 155222944501Smrg return ret; 155322944501Smrg } 155422944501Smrg } 155522944501Smrg 155622944501Smrg bo->virtual = bo_gem->gtt_virtual; 155722944501Smrg 155822944501Smrg DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, 155922944501Smrg bo_gem->gtt_virtual); 156022944501Smrg 156120131375Smrg return 0; 156220131375Smrg} 156320131375Smrg 15646260e5d5Smrgdrm_public int 1565a884aba1Smrgdrm_intel_gem_bo_map_gtt(drm_intel_bo *bo) 156620131375Smrg{ 156720131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 156820131375Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 156920131375Smrg struct drm_i915_gem_set_domain set_domain; 157020131375Smrg int ret; 157120131375Smrg 157220131375Smrg pthread_mutex_lock(&bufmgr_gem->lock); 157320131375Smrg 157420131375Smrg ret = map_gtt(bo); 157520131375Smrg if (ret) { 157620131375Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 157720131375Smrg return ret; 157820131375Smrg } 157920131375Smrg 158020131375Smrg /* Now move it to the GTT domain so that the GPU and CPU 158120131375Smrg * caches are flushed and the GPU isn't actively using the 158220131375Smrg * buffer. 158320131375Smrg * 158420131375Smrg * The pagefault handler does this domain change for us when 158520131375Smrg * it has unbound the BO from the GTT, but it's up to us to 158620131375Smrg * tell it when we're about to use things if we had done 158720131375Smrg * rendering and it still happens to be bound to the GTT. 158820131375Smrg */ 1589424e9256Smrg memclear(set_domain); 159022944501Smrg set_domain.handle = bo_gem->gem_handle; 159122944501Smrg set_domain.read_domains = I915_GEM_DOMAIN_GTT; 159222944501Smrg set_domain.write_domain = I915_GEM_DOMAIN_GTT; 15936d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 15946d98c517Smrg DRM_IOCTL_I915_GEM_SET_DOMAIN, 15956d98c517Smrg &set_domain); 159622944501Smrg if (ret != 0) { 15979ce4edccSmrg DBG("%s:%d: Error setting domain %d: %s\n", 15989ce4edccSmrg __FILE__, __LINE__, bo_gem->gem_handle, 15999ce4edccSmrg strerror(errno)); 160022944501Smrg } 160122944501Smrg 160220131375Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 160320131375Smrg VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size)); 160422944501Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 160522944501Smrg 16066d98c517Smrg return 0; 160722944501Smrg} 160822944501Smrg 160920131375Smrg/** 161020131375Smrg * Performs a mapping of the buffer object like the normal GTT 161120131375Smrg * mapping, but avoids waiting for the GPU to be done reading from or 161220131375Smrg * rendering to the buffer. 161320131375Smrg * 161420131375Smrg * This is used in the implementation of GL_ARB_map_buffer_range: The 161520131375Smrg * user asks to create a buffer, then does a mapping, fills some 161620131375Smrg * space, runs a drawing command, then asks to map it again without 161720131375Smrg * synchronizing because it guarantees that it won't write over the 161820131375Smrg * data that the GPU is busy using (or, more specifically, that if it 161920131375Smrg * does write over the data, it acknowledges that rendering is 162020131375Smrg * undefined). 162120131375Smrg */ 162220131375Smrg 16236260e5d5Smrgdrm_public int 1624a884aba1Smrgdrm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) 162522944501Smrg{ 162622944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 16272b90624aSmrg#if HAVE_VALGRIND 162820131375Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 162920131375Smrg#endif 163020131375Smrg int ret; 163122944501Smrg 163220131375Smrg /* If the CPU cache isn't coherent with the GTT, then use a 163320131375Smrg * regular synchronized mapping. The problem is that we don't 163420131375Smrg * track where the buffer was last used on the CPU side in 163520131375Smrg * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so 163620131375Smrg * we would potentially corrupt the buffer even when the user 163720131375Smrg * does reasonable things. 163820131375Smrg */ 163920131375Smrg if (!bufmgr_gem->has_llc) 164020131375Smrg return drm_intel_gem_bo_map_gtt(bo); 164122944501Smrg 164222944501Smrg pthread_mutex_lock(&bufmgr_gem->lock); 164320131375Smrg 164420131375Smrg ret = map_gtt(bo); 164520131375Smrg if (ret == 0) { 164620131375Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 164720131375Smrg VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size)); 164820131375Smrg } 164920131375Smrg 165022944501Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 165122944501Smrg 165222944501Smrg return ret; 165322944501Smrg} 165422944501Smrg 165522944501Smrgstatic int drm_intel_gem_bo_unmap(drm_intel_bo *bo) 165622944501Smrg{ 1657a884aba1Smrg drm_intel_bufmgr_gem *bufmgr_gem; 165822944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 165920131375Smrg int ret = 0; 166022944501Smrg 166122944501Smrg if (bo == NULL) 166222944501Smrg return 0; 166322944501Smrg 1664a884aba1Smrg if (bo_gem->is_userptr) 1665a884aba1Smrg return 0; 1666a884aba1Smrg 1667a884aba1Smrg bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1668a884aba1Smrg 166922944501Smrg pthread_mutex_lock(&bufmgr_gem->lock); 167022944501Smrg 167120131375Smrg if (bo_gem->map_count <= 0) { 167220131375Smrg DBG("attempted to unmap an unmapped bo\n"); 167320131375Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 167420131375Smrg /* Preserve the old behaviour of just treating this as a 167520131375Smrg * no-op rather than reporting the error. 167620131375Smrg */ 167720131375Smrg return 0; 167820131375Smrg } 167920131375Smrg 168020131375Smrg if (bo_gem->mapped_cpu_write) { 168120131375Smrg struct drm_i915_gem_sw_finish sw_finish; 168220131375Smrg 168320131375Smrg /* Cause a flush to happen if the buffer's pinned for 168420131375Smrg * scanout, so the results show up in a timely manner. 168520131375Smrg * Unlike GTT set domains, this only does work if the 168620131375Smrg * buffer should be scanout-related. 168720131375Smrg */ 1688424e9256Smrg memclear(sw_finish); 168920131375Smrg sw_finish.handle = bo_gem->gem_handle; 169020131375Smrg ret = drmIoctl(bufmgr_gem->fd, 169120131375Smrg DRM_IOCTL_I915_GEM_SW_FINISH, 169220131375Smrg &sw_finish); 169320131375Smrg ret = ret == -1 ? -errno : 0; 169420131375Smrg 169520131375Smrg bo_gem->mapped_cpu_write = false; 169620131375Smrg } 169722944501Smrg 169820131375Smrg /* We need to unmap after every innovation as we cannot track 16992ee35494Smrg * an open vma for every bo as that will exhaust the system 170020131375Smrg * limits and cause later failures. 170120131375Smrg */ 170220131375Smrg if (--bo_gem->map_count == 0) { 170320131375Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 170420131375Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 170520131375Smrg bo->virtual = NULL; 170620131375Smrg } 170722944501Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 170822944501Smrg 170922944501Smrg return ret; 171022944501Smrg} 171122944501Smrg 17126260e5d5Smrgdrm_public int 1713a884aba1Smrgdrm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) 171420131375Smrg{ 171520131375Smrg return drm_intel_gem_bo_unmap(bo); 171620131375Smrg} 171720131375Smrg 171822944501Smrgstatic int 171922944501Smrgdrm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset, 172022944501Smrg unsigned long size, const void *data) 172122944501Smrg{ 172222944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 172322944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 172422944501Smrg struct drm_i915_gem_pwrite pwrite; 172522944501Smrg int ret; 172622944501Smrg 1727a884aba1Smrg if (bo_gem->is_userptr) 1728a884aba1Smrg return -EINVAL; 1729a884aba1Smrg 1730424e9256Smrg memclear(pwrite); 173122944501Smrg pwrite.handle = bo_gem->gem_handle; 173222944501Smrg pwrite.offset = offset; 173322944501Smrg pwrite.size = size; 173422944501Smrg pwrite.data_ptr = (uint64_t) (uintptr_t) data; 17356d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 17366d98c517Smrg DRM_IOCTL_I915_GEM_PWRITE, 17376d98c517Smrg &pwrite); 173822944501Smrg if (ret != 0) { 173922944501Smrg ret = -errno; 17409ce4edccSmrg DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", 17419ce4edccSmrg __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, 17429ce4edccSmrg (int)size, strerror(errno)); 174322944501Smrg } 174422944501Smrg 174522944501Smrg return ret; 174622944501Smrg} 174722944501Smrg 174822944501Smrgstatic int 174922944501Smrgdrm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) 175022944501Smrg{ 175122944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 175222944501Smrg struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id; 175322944501Smrg int ret; 175422944501Smrg 1755424e9256Smrg memclear(get_pipe_from_crtc_id); 175622944501Smrg get_pipe_from_crtc_id.crtc_id = crtc_id; 17576d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 17586d98c517Smrg DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, 17596d98c517Smrg &get_pipe_from_crtc_id); 176022944501Smrg if (ret != 0) { 176122944501Smrg /* We return -1 here to signal that we don't 176222944501Smrg * know which pipe is associated with this crtc. 176322944501Smrg * This lets the caller know that this information 176422944501Smrg * isn't available; using the wrong pipe for 176522944501Smrg * vblank waiting can cause the chipset to lock up 176622944501Smrg */ 176722944501Smrg return -1; 176822944501Smrg } 176922944501Smrg 177022944501Smrg return get_pipe_from_crtc_id.pipe; 177122944501Smrg} 177222944501Smrg 177322944501Smrgstatic int 177422944501Smrgdrm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, 177522944501Smrg unsigned long size, void *data) 177622944501Smrg{ 177722944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 177822944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 177922944501Smrg struct drm_i915_gem_pread pread; 178022944501Smrg int ret; 178122944501Smrg 1782a884aba1Smrg if (bo_gem->is_userptr) 1783a884aba1Smrg return -EINVAL; 1784a884aba1Smrg 1785424e9256Smrg memclear(pread); 178622944501Smrg pread.handle = bo_gem->gem_handle; 178722944501Smrg pread.offset = offset; 178822944501Smrg pread.size = size; 178922944501Smrg pread.data_ptr = (uint64_t) (uintptr_t) data; 17906d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 17916d98c517Smrg DRM_IOCTL_I915_GEM_PREAD, 17926d98c517Smrg &pread); 179322944501Smrg if (ret != 0) { 179422944501Smrg ret = -errno; 17959ce4edccSmrg DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", 17969ce4edccSmrg __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, 17979ce4edccSmrg (int)size, strerror(errno)); 179822944501Smrg } 179922944501Smrg 180022944501Smrg return ret; 180122944501Smrg} 180222944501Smrg 18039ce4edccSmrg/** Waits for all GPU rendering with the object to have completed. */ 180422944501Smrgstatic void 180522944501Smrgdrm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) 180622944501Smrg{ 18079ce4edccSmrg drm_intel_gem_bo_start_gtt_access(bo, 1); 180822944501Smrg} 180922944501Smrg 181020131375Smrg/** 181120131375Smrg * Waits on a BO for the given amount of time. 181220131375Smrg * 181320131375Smrg * @bo: buffer object to wait for 181420131375Smrg * @timeout_ns: amount of time to wait in nanoseconds. 181520131375Smrg * If value is less than 0, an infinite wait will occur. 181620131375Smrg * 181720131375Smrg * Returns 0 if the wait was successful ie. the last batch referencing the 181820131375Smrg * object has completed within the allotted time. Otherwise some negative return 181920131375Smrg * value describes the error. Of particular interest is -ETIME when the wait has 182020131375Smrg * failed to yield the desired result. 182120131375Smrg * 182220131375Smrg * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows 182320131375Smrg * the operation to give up after a certain amount of time. Another subtle 182420131375Smrg * difference is the internal locking semantics are different (this variant does 182520131375Smrg * not hold the lock for the duration of the wait). This makes the wait subject 182620131375Smrg * to a larger userspace race window. 182720131375Smrg * 182820131375Smrg * The implementation shall wait until the object is no longer actively 182920131375Smrg * referenced within a batch buffer at the time of the call. The wait will 183020131375Smrg * not guarantee that the buffer is re-issued via another thread, or an flinked 183120131375Smrg * handle. Userspace must make sure this race does not occur if such precision 183220131375Smrg * is important. 1833424e9256Smrg * 1834424e9256Smrg * Note that some kernels have broken the inifite wait for negative values 1835424e9256Smrg * promise, upgrade to latest stable kernels if this is the case. 183620131375Smrg */ 18376260e5d5Smrgdrm_public int 1838a884aba1Smrgdrm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) 183920131375Smrg{ 184020131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 184120131375Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 184220131375Smrg struct drm_i915_gem_wait wait; 184320131375Smrg int ret; 184420131375Smrg 184520131375Smrg if (!bufmgr_gem->has_wait_timeout) { 184620131375Smrg DBG("%s:%d: Timed wait is not supported. Falling back to " 184720131375Smrg "infinite wait\n", __FILE__, __LINE__); 184820131375Smrg if (timeout_ns) { 184920131375Smrg drm_intel_gem_bo_wait_rendering(bo); 185020131375Smrg return 0; 185120131375Smrg } else { 185220131375Smrg return drm_intel_gem_bo_busy(bo) ? -ETIME : 0; 185320131375Smrg } 185420131375Smrg } 185520131375Smrg 1856424e9256Smrg memclear(wait); 185720131375Smrg wait.bo_handle = bo_gem->gem_handle; 185820131375Smrg wait.timeout_ns = timeout_ns; 185920131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); 186020131375Smrg if (ret == -1) 186120131375Smrg return -errno; 186220131375Smrg 186320131375Smrg return ret; 186420131375Smrg} 186520131375Smrg 186622944501Smrg/** 186722944501Smrg * Sets the object to the GTT read and possibly write domain, used by the X 186822944501Smrg * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt(). 186922944501Smrg * 187022944501Smrg * In combination with drm_intel_gem_bo_pin() and manual fence management, we 187122944501Smrg * can do tiled pixmaps this way. 187222944501Smrg */ 18736260e5d5Smrgdrm_public void 187422944501Smrgdrm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) 187522944501Smrg{ 187622944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 187722944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 187822944501Smrg struct drm_i915_gem_set_domain set_domain; 187922944501Smrg int ret; 188022944501Smrg 1881424e9256Smrg memclear(set_domain); 188222944501Smrg set_domain.handle = bo_gem->gem_handle; 188322944501Smrg set_domain.read_domains = I915_GEM_DOMAIN_GTT; 188422944501Smrg set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; 18856d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 18866d98c517Smrg DRM_IOCTL_I915_GEM_SET_DOMAIN, 18876d98c517Smrg &set_domain); 188822944501Smrg if (ret != 0) { 18899ce4edccSmrg DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", 18909ce4edccSmrg __FILE__, __LINE__, bo_gem->gem_handle, 18919ce4edccSmrg set_domain.read_domains, set_domain.write_domain, 18929ce4edccSmrg strerror(errno)); 189322944501Smrg } 189422944501Smrg} 189522944501Smrg 189622944501Smrgstatic void 189722944501Smrgdrm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr) 189822944501Smrg{ 189922944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 1900424e9256Smrg struct drm_gem_close close_bo; 1901424e9256Smrg int i, ret; 190222944501Smrg 190322944501Smrg free(bufmgr_gem->exec2_objects); 190422944501Smrg free(bufmgr_gem->exec_objects); 190522944501Smrg free(bufmgr_gem->exec_bos); 190622944501Smrg 190722944501Smrg pthread_mutex_destroy(&bufmgr_gem->lock); 190822944501Smrg 190922944501Smrg /* Free any cached buffer objects we were going to reuse */ 1910aaba2545Smrg for (i = 0; i < bufmgr_gem->num_buckets; i++) { 191122944501Smrg struct drm_intel_gem_bo_bucket *bucket = 191222944501Smrg &bufmgr_gem->cache_bucket[i]; 191322944501Smrg drm_intel_bo_gem *bo_gem; 191422944501Smrg 191522944501Smrg while (!DRMLISTEMPTY(&bucket->head)) { 191622944501Smrg bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 191722944501Smrg bucket->head.next, head); 191822944501Smrg DRMLISTDEL(&bo_gem->head); 191922944501Smrg 192022944501Smrg drm_intel_gem_bo_free(&bo_gem->bo); 192122944501Smrg } 192222944501Smrg } 192322944501Smrg 1924424e9256Smrg /* Release userptr bo kept hanging around for optimisation. */ 1925424e9256Smrg if (bufmgr_gem->userptr_active.ptr) { 1926424e9256Smrg memclear(close_bo); 1927424e9256Smrg close_bo.handle = bufmgr_gem->userptr_active.handle; 1928424e9256Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo); 1929424e9256Smrg free(bufmgr_gem->userptr_active.ptr); 1930424e9256Smrg if (ret) 1931424e9256Smrg fprintf(stderr, 1932424e9256Smrg "Failed to release test userptr object! (%d) " 1933424e9256Smrg "i915 kernel driver may not be sane!\n", errno); 1934424e9256Smrg } 1935424e9256Smrg 193622944501Smrg free(bufmgr); 193722944501Smrg} 193822944501Smrg 193922944501Smrg/** 194022944501Smrg * Adds the target buffer to the validation list and adds the relocation 194122944501Smrg * to the reloc_buffer's relocation list. 194222944501Smrg * 194322944501Smrg * The relocation entry at the given offset must already contain the 194422944501Smrg * precomputed relocation value, because the kernel will optimize out 194522944501Smrg * the relocation entry write when the buffer hasn't moved from the 194622944501Smrg * last known offset in target_bo. 194722944501Smrg */ 194822944501Smrgstatic int 194922944501Smrgdo_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, 195022944501Smrg drm_intel_bo *target_bo, uint32_t target_offset, 195122944501Smrg uint32_t read_domains, uint32_t write_domain, 195220131375Smrg bool need_fence) 195322944501Smrg{ 195422944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 195522944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 195622944501Smrg drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; 195720131375Smrg bool fenced_command; 195822944501Smrg 195922944501Smrg if (bo_gem->has_error) 196022944501Smrg return -ENOMEM; 196122944501Smrg 196222944501Smrg if (target_bo_gem->has_error) { 196320131375Smrg bo_gem->has_error = true; 196422944501Smrg return -ENOMEM; 196522944501Smrg } 196622944501Smrg 196722944501Smrg /* We never use HW fences for rendering on 965+ */ 196822944501Smrg if (bufmgr_gem->gen >= 4) 196920131375Smrg need_fence = false; 197022944501Smrg 19719ce4edccSmrg fenced_command = need_fence; 19729ce4edccSmrg if (target_bo_gem->tiling_mode == I915_TILING_NONE) 197320131375Smrg need_fence = false; 19749ce4edccSmrg 197522944501Smrg /* Create a new relocation list if needed */ 197622944501Smrg if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo)) 197722944501Smrg return -ENOMEM; 197822944501Smrg 197922944501Smrg /* Check overflow */ 198022944501Smrg assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); 198122944501Smrg 198222944501Smrg /* Check args */ 198322944501Smrg assert(offset <= bo->size - 4); 198422944501Smrg assert((write_domain & (write_domain - 1)) == 0); 198522944501Smrg 19863c748557Ssnj /* An object needing a fence is a tiled buffer, so it won't have 19873c748557Ssnj * relocs to other buffers. 19883c748557Ssnj */ 19893c748557Ssnj if (need_fence) { 19903c748557Ssnj assert(target_bo_gem->reloc_count == 0); 19913c748557Ssnj target_bo_gem->reloc_tree_fences = 1; 19923c748557Ssnj } 19933c748557Ssnj 199422944501Smrg /* Make sure that we're not adding a reloc to something whose size has 199522944501Smrg * already been accounted for. 199622944501Smrg */ 199722944501Smrg assert(!bo_gem->used_as_reloc_target); 1998aaba2545Smrg if (target_bo_gem != bo_gem) { 199920131375Smrg target_bo_gem->used_as_reloc_target = true; 2000aaba2545Smrg bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size; 20013c748557Ssnj bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; 2002aaba2545Smrg } 200322944501Smrg 200422944501Smrg bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; 2005aaba2545Smrg if (target_bo != bo) 2006aaba2545Smrg drm_intel_gem_bo_reference(target_bo); 20079ce4edccSmrg if (fenced_command) 200822944501Smrg bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 200922944501Smrg DRM_INTEL_RELOC_FENCE; 201022944501Smrg else 201122944501Smrg bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0; 201222944501Smrg 2013fe517fc9Smrg bo_gem->relocs[bo_gem->reloc_count].offset = offset; 2014fe517fc9Smrg bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; 2015fe517fc9Smrg bo_gem->relocs[bo_gem->reloc_count].target_handle = 2016fe517fc9Smrg target_bo_gem->gem_handle; 2017fe517fc9Smrg bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; 2018fe517fc9Smrg bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; 2019fe517fc9Smrg bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; 202022944501Smrg bo_gem->reloc_count++; 202122944501Smrg 202222944501Smrg return 0; 202322944501Smrg} 202422944501Smrg 2025fe517fc9Smrgstatic void 2026fe517fc9Smrgdrm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable) 2027fe517fc9Smrg{ 2028fe517fc9Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 20290655efefSmrg 20300655efefSmrg if (enable) 20310655efefSmrg bo_gem->kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS; 20320655efefSmrg else 20330655efefSmrg bo_gem->kflags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS; 2034fe517fc9Smrg} 2035fe517fc9Smrg 2036fe517fc9Smrgstatic int 2037fe517fc9Smrgdrm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo) 2038fe517fc9Smrg{ 2039fe517fc9Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2040fe517fc9Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2041fe517fc9Smrg drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; 2042fe517fc9Smrg if (bo_gem->has_error) 2043fe517fc9Smrg return -ENOMEM; 2044fe517fc9Smrg 2045fe517fc9Smrg if (target_bo_gem->has_error) { 2046fe517fc9Smrg bo_gem->has_error = true; 2047fe517fc9Smrg return -ENOMEM; 2048fe517fc9Smrg } 2049fe517fc9Smrg 20500655efefSmrg if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED)) 2051fe517fc9Smrg return -EINVAL; 2052fe517fc9Smrg if (target_bo_gem == bo_gem) 2053fe517fc9Smrg return -EINVAL; 2054fe517fc9Smrg 2055fe517fc9Smrg if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) { 2056fe517fc9Smrg int new_size = bo_gem->softpin_target_size * 2; 2057fe517fc9Smrg if (new_size == 0) 2058fe517fc9Smrg new_size = bufmgr_gem->max_relocs; 2059fe517fc9Smrg 2060fe517fc9Smrg bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size * 2061fe517fc9Smrg sizeof(drm_intel_bo *)); 2062fe517fc9Smrg if (!bo_gem->softpin_target) 2063fe517fc9Smrg return -ENOMEM; 2064fe517fc9Smrg 2065fe517fc9Smrg bo_gem->softpin_target_size = new_size; 2066fe517fc9Smrg } 2067fe517fc9Smrg bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo; 2068fe517fc9Smrg drm_intel_gem_bo_reference(target_bo); 2069fe517fc9Smrg bo_gem->softpin_target_count++; 2070fe517fc9Smrg 2071fe517fc9Smrg return 0; 2072fe517fc9Smrg} 2073fe517fc9Smrg 207422944501Smrgstatic int 207522944501Smrgdrm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, 207622944501Smrg drm_intel_bo *target_bo, uint32_t target_offset, 207722944501Smrg uint32_t read_domains, uint32_t write_domain) 207822944501Smrg{ 207922944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 2080fe517fc9Smrg drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo; 208122944501Smrg 20820655efefSmrg if (target_bo_gem->kflags & EXEC_OBJECT_PINNED) 2083fe517fc9Smrg return drm_intel_gem_bo_add_softpin_target(bo, target_bo); 2084fe517fc9Smrg else 2085fe517fc9Smrg return do_bo_emit_reloc(bo, offset, target_bo, target_offset, 2086fe517fc9Smrg read_domains, write_domain, 2087fe517fc9Smrg !bufmgr_gem->fenced_relocs); 208822944501Smrg} 208922944501Smrg 209022944501Smrgstatic int 209122944501Smrgdrm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset, 209222944501Smrg drm_intel_bo *target_bo, 209322944501Smrg uint32_t target_offset, 209422944501Smrg uint32_t read_domains, uint32_t write_domain) 209522944501Smrg{ 209622944501Smrg return do_bo_emit_reloc(bo, offset, target_bo, target_offset, 209720131375Smrg read_domains, write_domain, true); 209820131375Smrg} 209920131375Smrg 21006260e5d5Smrgdrm_public int 210120131375Smrgdrm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo) 210220131375Smrg{ 210320131375Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 210420131375Smrg 210520131375Smrg return bo_gem->reloc_count; 210620131375Smrg} 210720131375Smrg 210820131375Smrg/** 210920131375Smrg * Removes existing relocation entries in the BO after "start". 211020131375Smrg * 211120131375Smrg * This allows a user to avoid a two-step process for state setup with 211220131375Smrg * counting up all the buffer objects and doing a 211320131375Smrg * drm_intel_bufmgr_check_aperture_space() before emitting any of the 211420131375Smrg * relocations for the state setup. Instead, save the state of the 211520131375Smrg * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the 211620131375Smrg * state, and then check if it still fits in the aperture. 211720131375Smrg * 211820131375Smrg * Any further drm_intel_bufmgr_check_aperture_space() queries 211920131375Smrg * involving this buffer in the tree are undefined after this call. 2120fe517fc9Smrg * 2121fe517fc9Smrg * This also removes all softpinned targets being referenced by the BO. 212220131375Smrg */ 21236260e5d5Smrgdrm_public void 212420131375Smrgdrm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) 212520131375Smrg{ 2126a884aba1Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 212720131375Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 212820131375Smrg int i; 212920131375Smrg struct timespec time; 213020131375Smrg 213120131375Smrg clock_gettime(CLOCK_MONOTONIC, &time); 213220131375Smrg 213320131375Smrg assert(bo_gem->reloc_count >= start); 2134a884aba1Smrg 213520131375Smrg /* Unreference the cleared target buffers */ 2136a884aba1Smrg pthread_mutex_lock(&bufmgr_gem->lock); 2137a884aba1Smrg 213820131375Smrg for (i = start; i < bo_gem->reloc_count; i++) { 213920131375Smrg drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo; 214020131375Smrg if (&target_bo_gem->bo != bo) { 214120131375Smrg bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences; 214220131375Smrg drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, 214320131375Smrg time.tv_sec); 214420131375Smrg } 214520131375Smrg } 214620131375Smrg bo_gem->reloc_count = start; 2147a884aba1Smrg 2148fe517fc9Smrg for (i = 0; i < bo_gem->softpin_target_count; i++) { 2149fe517fc9Smrg drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i]; 2150fe517fc9Smrg drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec); 2151fe517fc9Smrg } 2152fe517fc9Smrg bo_gem->softpin_target_count = 0; 2153fe517fc9Smrg 2154a884aba1Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 2155a884aba1Smrg 215622944501Smrg} 215722944501Smrg 215822944501Smrg/** 215922944501Smrg * Walk the tree of relocations rooted at BO and accumulate the list of 216022944501Smrg * validations to be performed and update the relocation buffers with 216122944501Smrg * index values into the validation list. 216222944501Smrg */ 216322944501Smrgstatic void 216422944501Smrgdrm_intel_gem_bo_process_reloc(drm_intel_bo *bo) 216522944501Smrg{ 216622944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 216722944501Smrg int i; 216822944501Smrg 216922944501Smrg if (bo_gem->relocs == NULL) 217022944501Smrg return; 217122944501Smrg 217222944501Smrg for (i = 0; i < bo_gem->reloc_count; i++) { 217322944501Smrg drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; 217422944501Smrg 2175aaba2545Smrg if (target_bo == bo) 2176aaba2545Smrg continue; 2177aaba2545Smrg 217820131375Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 217920131375Smrg 218022944501Smrg /* Continue walking the tree depth-first. */ 218122944501Smrg drm_intel_gem_bo_process_reloc(target_bo); 218222944501Smrg 218322944501Smrg /* Add the target to the validate list */ 218422944501Smrg drm_intel_add_validate_buffer(target_bo); 218522944501Smrg } 218622944501Smrg} 218722944501Smrg 218822944501Smrgstatic void 218922944501Smrgdrm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) 219022944501Smrg{ 219122944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 219222944501Smrg int i; 219322944501Smrg 2194fe517fc9Smrg if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) 219522944501Smrg return; 219622944501Smrg 219722944501Smrg for (i = 0; i < bo_gem->reloc_count; i++) { 219822944501Smrg drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; 219922944501Smrg int need_fence; 220022944501Smrg 2201aaba2545Smrg if (target_bo == bo) 2202aaba2545Smrg continue; 2203aaba2545Smrg 220420131375Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 220520131375Smrg 220622944501Smrg /* Continue walking the tree depth-first. */ 220722944501Smrg drm_intel_gem_bo_process_reloc2(target_bo); 220822944501Smrg 220922944501Smrg need_fence = (bo_gem->reloc_target_info[i].flags & 221022944501Smrg DRM_INTEL_RELOC_FENCE); 221122944501Smrg 221222944501Smrg /* Add the target to the validate list */ 221322944501Smrg drm_intel_add_validate_buffer2(target_bo, need_fence); 221422944501Smrg } 2215fe517fc9Smrg 2216fe517fc9Smrg for (i = 0; i < bo_gem->softpin_target_count; i++) { 2217fe517fc9Smrg drm_intel_bo *target_bo = bo_gem->softpin_target[i]; 2218fe517fc9Smrg 2219fe517fc9Smrg if (target_bo == bo) 2220fe517fc9Smrg continue; 2221fe517fc9Smrg 2222fe517fc9Smrg drm_intel_gem_bo_mark_mmaps_incoherent(bo); 2223fe517fc9Smrg drm_intel_gem_bo_process_reloc2(target_bo); 2224fe517fc9Smrg drm_intel_add_validate_buffer2(target_bo, false); 2225fe517fc9Smrg } 222622944501Smrg} 222722944501Smrg 222822944501Smrg 222922944501Smrgstatic void 223022944501Smrgdrm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) 223122944501Smrg{ 223222944501Smrg int i; 223322944501Smrg 223422944501Smrg for (i = 0; i < bufmgr_gem->exec_count; i++) { 223522944501Smrg drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 223622944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 223722944501Smrg 223822944501Smrg /* Update the buffer offset */ 223920131375Smrg if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { 2240fe517fc9Smrg DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", 2241d82d45b3Sjoerg bo_gem->gem_handle, bo_gem->name, 2242fe517fc9Smrg upper_32_bits(bo->offset64), 2243fe517fc9Smrg lower_32_bits(bo->offset64), 2244fe517fc9Smrg upper_32_bits(bufmgr_gem->exec_objects[i].offset), 2245fe517fc9Smrg lower_32_bits(bufmgr_gem->exec_objects[i].offset)); 224620131375Smrg bo->offset64 = bufmgr_gem->exec_objects[i].offset; 224722944501Smrg bo->offset = bufmgr_gem->exec_objects[i].offset; 224822944501Smrg } 224922944501Smrg } 225022944501Smrg} 225122944501Smrg 225222944501Smrgstatic void 225322944501Smrgdrm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem) 225422944501Smrg{ 225522944501Smrg int i; 225622944501Smrg 225722944501Smrg for (i = 0; i < bufmgr_gem->exec_count; i++) { 225822944501Smrg drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 225922944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 226022944501Smrg 226122944501Smrg /* Update the buffer offset */ 226220131375Smrg if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { 2263fe517fc9Smrg /* If we're seeing softpinned object here it means that the kernel 2264fe517fc9Smrg * has relocated our object... Indicating a programming error 2265fe517fc9Smrg */ 22660655efefSmrg assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED)); 2267fe517fc9Smrg DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", 2268d82d45b3Sjoerg bo_gem->gem_handle, bo_gem->name, 2269fe517fc9Smrg upper_32_bits(bo->offset64), 2270fe517fc9Smrg lower_32_bits(bo->offset64), 2271fe517fc9Smrg upper_32_bits(bufmgr_gem->exec2_objects[i].offset), 2272fe517fc9Smrg lower_32_bits(bufmgr_gem->exec2_objects[i].offset)); 227320131375Smrg bo->offset64 = bufmgr_gem->exec2_objects[i].offset; 227422944501Smrg bo->offset = bufmgr_gem->exec2_objects[i].offset; 227522944501Smrg } 227622944501Smrg } 227722944501Smrg} 227822944501Smrg 22796260e5d5Smrgdrm_public void 228020131375Smrgdrm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo, 228120131375Smrg int x1, int y1, int width, int height, 228220131375Smrg enum aub_dump_bmp_format format, 228320131375Smrg int pitch, int offset) 228420131375Smrg{ 228520131375Smrg} 228620131375Smrg 228720131375Smrgstatic int 228820131375Smrgdrm_intel_gem_bo_exec(drm_intel_bo *bo, int used, 228920131375Smrg drm_clip_rect_t * cliprects, int num_cliprects, int DR4) 229020131375Smrg{ 229120131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 229220131375Smrg struct drm_i915_gem_execbuffer execbuf; 229320131375Smrg int ret, i; 229420131375Smrg 2295fe517fc9Smrg if (to_bo_gem(bo)->has_error) 229620131375Smrg return -ENOMEM; 229720131375Smrg 229820131375Smrg pthread_mutex_lock(&bufmgr_gem->lock); 229920131375Smrg /* Update indices and set up the validate list. */ 230020131375Smrg drm_intel_gem_bo_process_reloc(bo); 230120131375Smrg 230220131375Smrg /* Add the batch buffer to the validation list. There are no 230320131375Smrg * relocations pointing to it. 230420131375Smrg */ 230520131375Smrg drm_intel_add_validate_buffer(bo); 230620131375Smrg 2307424e9256Smrg memclear(execbuf); 230820131375Smrg execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects; 230920131375Smrg execbuf.buffer_count = bufmgr_gem->exec_count; 231020131375Smrg execbuf.batch_start_offset = 0; 231120131375Smrg execbuf.batch_len = used; 231220131375Smrg execbuf.cliprects_ptr = (uintptr_t) cliprects; 231320131375Smrg execbuf.num_cliprects = num_cliprects; 231420131375Smrg execbuf.DR1 = 0; 231520131375Smrg execbuf.DR4 = DR4; 231620131375Smrg 231720131375Smrg ret = drmIoctl(bufmgr_gem->fd, 231820131375Smrg DRM_IOCTL_I915_GEM_EXECBUFFER, 231920131375Smrg &execbuf); 232020131375Smrg if (ret != 0) { 232120131375Smrg ret = -errno; 232220131375Smrg if (errno == ENOSPC) { 232320131375Smrg DBG("Execbuffer fails to pin. " 232420131375Smrg "Estimate: %u. Actual: %u. Available: %u\n", 232520131375Smrg drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, 232620131375Smrg bufmgr_gem-> 232720131375Smrg exec_count), 232820131375Smrg drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, 232920131375Smrg bufmgr_gem-> 233020131375Smrg exec_count), 233120131375Smrg (unsigned int)bufmgr_gem->gtt_size); 233220131375Smrg } 233320131375Smrg } 233420131375Smrg drm_intel_update_buffer_offsets(bufmgr_gem); 233520131375Smrg 233620131375Smrg if (bufmgr_gem->bufmgr.debug) 233720131375Smrg drm_intel_gem_dump_validation_list(bufmgr_gem); 233820131375Smrg 233920131375Smrg for (i = 0; i < bufmgr_gem->exec_count; i++) { 2340fe517fc9Smrg drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]); 234120131375Smrg 234220131375Smrg bo_gem->idle = false; 234320131375Smrg 234420131375Smrg /* Disconnect the buffer from the validate list */ 234520131375Smrg bo_gem->validate_index = -1; 234620131375Smrg bufmgr_gem->exec_bos[i] = NULL; 234720131375Smrg } 234820131375Smrg bufmgr_gem->exec_count = 0; 234920131375Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 235020131375Smrg 235120131375Smrg return ret; 235220131375Smrg} 235320131375Smrg 235420131375Smrgstatic int 235520131375Smrgdo_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, 235620131375Smrg drm_clip_rect_t *cliprects, int num_cliprects, int DR4, 23572ee35494Smrg int in_fence, int *out_fence, 235820131375Smrg unsigned int flags) 235920131375Smrg{ 236020131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 236120131375Smrg struct drm_i915_gem_execbuffer2 execbuf; 236220131375Smrg int ret = 0; 236320131375Smrg int i; 236420131375Smrg 2365fe517fc9Smrg if (to_bo_gem(bo)->has_error) 2366fe517fc9Smrg return -ENOMEM; 2367fe517fc9Smrg 236820131375Smrg switch (flags & 0x7) { 236920131375Smrg default: 237020131375Smrg return -EINVAL; 237120131375Smrg case I915_EXEC_BLT: 23729ce4edccSmrg if (!bufmgr_gem->has_blt) 23739ce4edccSmrg return -EINVAL; 23749ce4edccSmrg break; 23759ce4edccSmrg case I915_EXEC_BSD: 23769ce4edccSmrg if (!bufmgr_gem->has_bsd) 23779ce4edccSmrg return -EINVAL; 23789ce4edccSmrg break; 237920131375Smrg case I915_EXEC_VEBOX: 238020131375Smrg if (!bufmgr_gem->has_vebox) 238120131375Smrg return -EINVAL; 238220131375Smrg break; 23839ce4edccSmrg case I915_EXEC_RENDER: 23849ce4edccSmrg case I915_EXEC_DEFAULT: 23859ce4edccSmrg break; 23869ce4edccSmrg } 2387aaba2545Smrg 238822944501Smrg pthread_mutex_lock(&bufmgr_gem->lock); 238922944501Smrg /* Update indices and set up the validate list. */ 239022944501Smrg drm_intel_gem_bo_process_reloc2(bo); 239122944501Smrg 239222944501Smrg /* Add the batch buffer to the validation list. There are no relocations 239322944501Smrg * pointing to it. 239422944501Smrg */ 239522944501Smrg drm_intel_add_validate_buffer2(bo, 0); 239622944501Smrg 2397424e9256Smrg memclear(execbuf); 239822944501Smrg execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects; 239922944501Smrg execbuf.buffer_count = bufmgr_gem->exec_count; 240022944501Smrg execbuf.batch_start_offset = 0; 240122944501Smrg execbuf.batch_len = used; 240222944501Smrg execbuf.cliprects_ptr = (uintptr_t)cliprects; 240322944501Smrg execbuf.num_cliprects = num_cliprects; 240422944501Smrg execbuf.DR1 = 0; 240522944501Smrg execbuf.DR4 = DR4; 240620131375Smrg execbuf.flags = flags; 240720131375Smrg if (ctx == NULL) 240820131375Smrg i915_execbuffer2_set_context_id(execbuf, 0); 240920131375Smrg else 241020131375Smrg i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id); 241122944501Smrg execbuf.rsvd2 = 0; 24122ee35494Smrg if (in_fence != -1) { 24132ee35494Smrg execbuf.rsvd2 = in_fence; 24142ee35494Smrg execbuf.flags |= I915_EXEC_FENCE_IN; 24152ee35494Smrg } 24162ee35494Smrg if (out_fence != NULL) { 24172ee35494Smrg *out_fence = -1; 24182ee35494Smrg execbuf.flags |= I915_EXEC_FENCE_OUT; 24192ee35494Smrg } 242022944501Smrg 242120131375Smrg if (bufmgr_gem->no_exec) 242220131375Smrg goto skip_execution; 242320131375Smrg 24246d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 24252ee35494Smrg DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, 24266d98c517Smrg &execbuf); 242722944501Smrg if (ret != 0) { 242822944501Smrg ret = -errno; 24296d98c517Smrg if (ret == -ENOSPC) { 24309ce4edccSmrg DBG("Execbuffer fails to pin. " 24319ce4edccSmrg "Estimate: %u. Actual: %u. Available: %u\n", 24329ce4edccSmrg drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, 24339ce4edccSmrg bufmgr_gem->exec_count), 24349ce4edccSmrg drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, 24359ce4edccSmrg bufmgr_gem->exec_count), 24369ce4edccSmrg (unsigned int) bufmgr_gem->gtt_size); 243722944501Smrg } 243822944501Smrg } 243922944501Smrg drm_intel_update_buffer_offsets2(bufmgr_gem); 244022944501Smrg 24412ee35494Smrg if (ret == 0 && out_fence != NULL) 24422ee35494Smrg *out_fence = execbuf.rsvd2 >> 32; 24432ee35494Smrg 244420131375Smrgskip_execution: 244522944501Smrg if (bufmgr_gem->bufmgr.debug) 244622944501Smrg drm_intel_gem_dump_validation_list(bufmgr_gem); 244722944501Smrg 244822944501Smrg for (i = 0; i < bufmgr_gem->exec_count; i++) { 2449fe517fc9Smrg drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]); 245022944501Smrg 245120131375Smrg bo_gem->idle = false; 245220131375Smrg 245322944501Smrg /* Disconnect the buffer from the validate list */ 245422944501Smrg bo_gem->validate_index = -1; 245522944501Smrg bufmgr_gem->exec_bos[i] = NULL; 245622944501Smrg } 245722944501Smrg bufmgr_gem->exec_count = 0; 245822944501Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 245922944501Smrg 246022944501Smrg return ret; 246122944501Smrg} 246222944501Smrg 2463aaba2545Smrgstatic int 2464aaba2545Smrgdrm_intel_gem_bo_exec2(drm_intel_bo *bo, int used, 2465aaba2545Smrg drm_clip_rect_t *cliprects, int num_cliprects, 2466aaba2545Smrg int DR4) 2467aaba2545Smrg{ 246820131375Smrg return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, 24692ee35494Smrg -1, NULL, I915_EXEC_RENDER); 247020131375Smrg} 247120131375Smrg 247220131375Smrgstatic int 247320131375Smrgdrm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used, 247420131375Smrg drm_clip_rect_t *cliprects, int num_cliprects, int DR4, 247520131375Smrg unsigned int flags) 247620131375Smrg{ 247720131375Smrg return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, 24782ee35494Smrg -1, NULL, flags); 247920131375Smrg} 248020131375Smrg 24816260e5d5Smrgdrm_public int 248220131375Smrgdrm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, 248320131375Smrg int used, unsigned int flags) 248420131375Smrg{ 24852ee35494Smrg return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags); 24862ee35494Smrg} 24872ee35494Smrg 24886260e5d5Smrgdrm_public int 24892ee35494Smrgdrm_intel_gem_bo_fence_exec(drm_intel_bo *bo, 24902ee35494Smrg drm_intel_context *ctx, 24912ee35494Smrg int used, 24922ee35494Smrg int in_fence, 24932ee35494Smrg int *out_fence, 24942ee35494Smrg unsigned int flags) 24952ee35494Smrg{ 24962ee35494Smrg return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags); 2497aaba2545Smrg} 2498aaba2545Smrg 249922944501Smrgstatic int 250022944501Smrgdrm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) 250122944501Smrg{ 250222944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 250322944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 250422944501Smrg struct drm_i915_gem_pin pin; 250522944501Smrg int ret; 250622944501Smrg 2507424e9256Smrg memclear(pin); 250822944501Smrg pin.handle = bo_gem->gem_handle; 250922944501Smrg pin.alignment = alignment; 251022944501Smrg 25116d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 25126d98c517Smrg DRM_IOCTL_I915_GEM_PIN, 25136d98c517Smrg &pin); 251422944501Smrg if (ret != 0) 251522944501Smrg return -errno; 251622944501Smrg 251720131375Smrg bo->offset64 = pin.offset; 251822944501Smrg bo->offset = pin.offset; 251922944501Smrg return 0; 252022944501Smrg} 252122944501Smrg 252222944501Smrgstatic int 252322944501Smrgdrm_intel_gem_bo_unpin(drm_intel_bo *bo) 252422944501Smrg{ 252522944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 252622944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 252722944501Smrg struct drm_i915_gem_unpin unpin; 252822944501Smrg int ret; 252922944501Smrg 2530424e9256Smrg memclear(unpin); 253122944501Smrg unpin.handle = bo_gem->gem_handle; 253222944501Smrg 25336d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); 253422944501Smrg if (ret != 0) 253522944501Smrg return -errno; 253622944501Smrg 253722944501Smrg return 0; 253822944501Smrg} 253922944501Smrg 254022944501Smrgstatic int 25416d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, 25426d98c517Smrg uint32_t tiling_mode, 25436d98c517Smrg uint32_t stride) 254422944501Smrg{ 254522944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 254622944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 254722944501Smrg struct drm_i915_gem_set_tiling set_tiling; 254822944501Smrg int ret; 254922944501Smrg 25506d98c517Smrg if (bo_gem->global_name == 0 && 25516d98c517Smrg tiling_mode == bo_gem->tiling_mode && 25526d98c517Smrg stride == bo_gem->stride) 255322944501Smrg return 0; 255422944501Smrg 255522944501Smrg memset(&set_tiling, 0, sizeof(set_tiling)); 255622944501Smrg do { 25576d98c517Smrg /* set_tiling is slightly broken and overwrites the 25586d98c517Smrg * input on the error path, so we have to open code 25596d98c517Smrg * rmIoctl. 25606d98c517Smrg */ 25616d98c517Smrg set_tiling.handle = bo_gem->gem_handle; 25626d98c517Smrg set_tiling.tiling_mode = tiling_mode; 256322944501Smrg set_tiling.stride = stride; 256422944501Smrg 256522944501Smrg ret = ioctl(bufmgr_gem->fd, 256622944501Smrg DRM_IOCTL_I915_GEM_SET_TILING, 256722944501Smrg &set_tiling); 25686d98c517Smrg } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 25696d98c517Smrg if (ret == -1) 25706d98c517Smrg return -errno; 25716d98c517Smrg 25726d98c517Smrg bo_gem->tiling_mode = set_tiling.tiling_mode; 25736d98c517Smrg bo_gem->swizzle_mode = set_tiling.swizzle_mode; 25746d98c517Smrg bo_gem->stride = set_tiling.stride; 25756d98c517Smrg return 0; 25766d98c517Smrg} 25776d98c517Smrg 25786d98c517Smrgstatic int 25796d98c517Smrgdrm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 25806d98c517Smrg uint32_t stride) 25816d98c517Smrg{ 25826d98c517Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 25836d98c517Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 25846d98c517Smrg int ret; 25856d98c517Smrg 2586a884aba1Smrg /* Tiling with userptr surfaces is not supported 2587a884aba1Smrg * on all hardware so refuse it for time being. 2588a884aba1Smrg */ 2589a884aba1Smrg if (bo_gem->is_userptr) 2590a884aba1Smrg return -EINVAL; 2591a884aba1Smrg 25926d98c517Smrg /* Linear buffers have no stride. By ensuring that we only ever use 25936d98c517Smrg * stride 0 with linear buffers, we simplify our code. 25946d98c517Smrg */ 25956d98c517Smrg if (*tiling_mode == I915_TILING_NONE) 25966d98c517Smrg stride = 0; 25976d98c517Smrg 25986d98c517Smrg ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride); 25996d98c517Smrg if (ret == 0) 2600fe517fc9Smrg drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 260122944501Smrg 260222944501Smrg *tiling_mode = bo_gem->tiling_mode; 2603aaba2545Smrg return ret; 260422944501Smrg} 260522944501Smrg 260622944501Smrgstatic int 260722944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 260822944501Smrg uint32_t * swizzle_mode) 260922944501Smrg{ 261022944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 261122944501Smrg 261222944501Smrg *tiling_mode = bo_gem->tiling_mode; 261322944501Smrg *swizzle_mode = bo_gem->swizzle_mode; 261422944501Smrg return 0; 261522944501Smrg} 261622944501Smrg 2617fe517fc9Smrgstatic int 2618fe517fc9Smrgdrm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset) 2619fe517fc9Smrg{ 2620fe517fc9Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2621fe517fc9Smrg 2622fe517fc9Smrg bo->offset64 = offset; 2623fe517fc9Smrg bo->offset = offset; 26240655efefSmrg bo_gem->kflags |= EXEC_OBJECT_PINNED; 26250655efefSmrg 2626fe517fc9Smrg return 0; 2627fe517fc9Smrg} 2628fe517fc9Smrg 26296260e5d5Smrgdrm_public drm_intel_bo * 263020131375Smrgdrm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size) 263120131375Smrg{ 263220131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 263320131375Smrg int ret; 263420131375Smrg uint32_t handle; 263520131375Smrg drm_intel_bo_gem *bo_gem; 263620131375Smrg struct drm_i915_gem_get_tiling get_tiling; 263720131375Smrg 2638fe517fc9Smrg pthread_mutex_lock(&bufmgr_gem->lock); 263920131375Smrg ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle); 2640fe517fc9Smrg if (ret) { 2641fe517fc9Smrg DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno)); 2642fe517fc9Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 2643fe517fc9Smrg return NULL; 2644fe517fc9Smrg } 264520131375Smrg 264620131375Smrg /* 264720131375Smrg * See if the kernel has already returned this buffer to us. Just as 264820131375Smrg * for named buffers, we must not create two bo's pointing at the same 264920131375Smrg * kernel object 265020131375Smrg */ 26512ee35494Smrg HASH_FIND(handle_hh, bufmgr_gem->handle_table, 26522ee35494Smrg &handle, sizeof(handle), bo_gem); 26532ee35494Smrg if (bo_gem) { 26542ee35494Smrg drm_intel_gem_bo_reference(&bo_gem->bo); 26552ee35494Smrg goto out; 265620131375Smrg } 265720131375Smrg 265820131375Smrg bo_gem = calloc(1, sizeof(*bo_gem)); 26592ee35494Smrg if (!bo_gem) 26602ee35494Smrg goto out; 26612ee35494Smrg 26622ee35494Smrg atomic_set(&bo_gem->refcount, 1); 26632ee35494Smrg DRMINITLISTHEAD(&bo_gem->vma_list); 26642ee35494Smrg 266520131375Smrg /* Determine size of bo. The fd-to-handle ioctl really should 266620131375Smrg * return the size, but it doesn't. If we have kernel 3.12 or 266720131375Smrg * later, we can lseek on the prime fd to get the size. Older 266820131375Smrg * kernels will just fail, in which case we fall back to the 266920131375Smrg * provided (estimated or guess size). */ 267020131375Smrg ret = lseek(prime_fd, 0, SEEK_END); 267120131375Smrg if (ret != -1) 267220131375Smrg bo_gem->bo.size = ret; 267320131375Smrg else 267420131375Smrg bo_gem->bo.size = size; 267520131375Smrg 267620131375Smrg bo_gem->bo.handle = handle; 267720131375Smrg bo_gem->bo.bufmgr = bufmgr; 267820131375Smrg 267920131375Smrg bo_gem->gem_handle = handle; 26802ee35494Smrg HASH_ADD(handle_hh, bufmgr_gem->handle_table, 26812ee35494Smrg gem_handle, sizeof(bo_gem->gem_handle), bo_gem); 268220131375Smrg 268320131375Smrg bo_gem->name = "prime"; 268420131375Smrg bo_gem->validate_index = -1; 268520131375Smrg bo_gem->reloc_tree_fences = 0; 268620131375Smrg bo_gem->used_as_reloc_target = false; 268720131375Smrg bo_gem->has_error = false; 268820131375Smrg bo_gem->reusable = false; 268920131375Smrg 2690424e9256Smrg memclear(get_tiling); 269120131375Smrg get_tiling.handle = bo_gem->gem_handle; 26922ee35494Smrg if (drmIoctl(bufmgr_gem->fd, 26932ee35494Smrg DRM_IOCTL_I915_GEM_GET_TILING, 26942ee35494Smrg &get_tiling)) 26952ee35494Smrg goto err; 26962ee35494Smrg 269720131375Smrg bo_gem->tiling_mode = get_tiling.tiling_mode; 269820131375Smrg bo_gem->swizzle_mode = get_tiling.swizzle_mode; 269920131375Smrg /* XXX stride is unknown */ 2700fe517fc9Smrg drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 270120131375Smrg 27022ee35494Smrgout: 27032ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 270420131375Smrg return &bo_gem->bo; 27052ee35494Smrg 27062ee35494Smrgerr: 27072ee35494Smrg drm_intel_gem_bo_free(&bo_gem->bo); 27082ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 27092ee35494Smrg return NULL; 271020131375Smrg} 271120131375Smrg 27126260e5d5Smrgdrm_public int 271320131375Smrgdrm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd) 271420131375Smrg{ 271520131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 271620131375Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 271720131375Smrg 271820131375Smrg if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, 271920131375Smrg DRM_CLOEXEC, prime_fd) != 0) 272020131375Smrg return -errno; 272120131375Smrg 272220131375Smrg bo_gem->reusable = false; 272320131375Smrg 272420131375Smrg return 0; 272520131375Smrg} 272620131375Smrg 272722944501Smrgstatic int 272822944501Smrgdrm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) 272922944501Smrg{ 273022944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 273122944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 273222944501Smrg 273322944501Smrg if (!bo_gem->global_name) { 273420131375Smrg struct drm_gem_flink flink; 273520131375Smrg 2736424e9256Smrg memclear(flink); 273722944501Smrg flink.handle = bo_gem->gem_handle; 27382ee35494Smrg if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink)) 27392ee35494Smrg return -errno; 274022944501Smrg 2741a884aba1Smrg pthread_mutex_lock(&bufmgr_gem->lock); 27422ee35494Smrg if (!bo_gem->global_name) { 27432ee35494Smrg bo_gem->global_name = flink.name; 27442ee35494Smrg bo_gem->reusable = false; 2745a884aba1Smrg 27462ee35494Smrg HASH_ADD(name_hh, bufmgr_gem->name_table, 27472ee35494Smrg global_name, sizeof(bo_gem->global_name), 27482ee35494Smrg bo_gem); 2749a884aba1Smrg } 2750a884aba1Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 275122944501Smrg } 275222944501Smrg 275322944501Smrg *name = bo_gem->global_name; 275422944501Smrg return 0; 275522944501Smrg} 275622944501Smrg 275722944501Smrg/** 275822944501Smrg * Enables unlimited caching of buffer objects for reuse. 275922944501Smrg * 276022944501Smrg * This is potentially very memory expensive, as the cache at each bucket 276122944501Smrg * size is only bounded by how many buffers of that size we've managed to have 276222944501Smrg * in flight at once. 276322944501Smrg */ 27646260e5d5Smrgdrm_public void 276522944501Smrgdrm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr) 276622944501Smrg{ 276722944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 276822944501Smrg 276920131375Smrg bufmgr_gem->bo_reuse = true; 277022944501Smrg} 277122944501Smrg 27722ee35494Smrg/** 27732ee35494Smrg * Disables implicit synchronisation before executing the bo 27742ee35494Smrg * 27752ee35494Smrg * This will cause rendering corruption unless you correctly manage explicit 27762ee35494Smrg * fences for all rendering involving this buffer - including use by others. 27772ee35494Smrg * Disabling the implicit serialisation is only required if that serialisation 27782ee35494Smrg * is too coarse (for example, you have split the buffer into many 27792ee35494Smrg * non-overlapping regions and are sharing the whole buffer between concurrent 27802ee35494Smrg * independent command streams). 27812ee35494Smrg * 27822ee35494Smrg * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC, 27832ee35494Smrg * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync, 27842ee35494Smrg * or subsequent execbufs involving the bo will generate EINVAL. 27852ee35494Smrg */ 27866260e5d5Smrgdrm_public void 27872ee35494Smrgdrm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo) 27882ee35494Smrg{ 27892ee35494Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 27902ee35494Smrg 27912ee35494Smrg bo_gem->kflags |= EXEC_OBJECT_ASYNC; 27922ee35494Smrg} 27932ee35494Smrg 27942ee35494Smrg/** 27952ee35494Smrg * Enables implicit synchronisation before executing the bo 27962ee35494Smrg * 27972ee35494Smrg * This is the default behaviour of the kernel, to wait upon prior writes 27982ee35494Smrg * completing on the object before rendering with it, or to wait for prior 27992ee35494Smrg * reads to complete before writing into the object. 28002ee35494Smrg * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling 28012ee35494Smrg * the kernel never to insert a stall before using the object. Then this 28022ee35494Smrg * function can be used to restore the implicit sync before subsequent 28032ee35494Smrg * rendering. 28042ee35494Smrg */ 28056260e5d5Smrgdrm_public void 28062ee35494Smrgdrm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo) 28072ee35494Smrg{ 28082ee35494Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 28092ee35494Smrg 28102ee35494Smrg bo_gem->kflags &= ~EXEC_OBJECT_ASYNC; 28112ee35494Smrg} 28122ee35494Smrg 28132ee35494Smrg/** 28142ee35494Smrg * Query whether the kernel supports disabling of its implicit synchronisation 28152ee35494Smrg * before execbuf. See drm_intel_gem_bo_disable_implicit_sync() 28162ee35494Smrg */ 28176260e5d5Smrgdrm_public int 28182ee35494Smrgdrm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr) 28192ee35494Smrg{ 28202ee35494Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 28212ee35494Smrg 28222ee35494Smrg return bufmgr_gem->has_exec_async; 28232ee35494Smrg} 28242ee35494Smrg 282522944501Smrg/** 282622944501Smrg * Enable use of fenced reloc type. 282722944501Smrg * 282822944501Smrg * New code should enable this to avoid unnecessary fence register 282922944501Smrg * allocation. If this option is not enabled, all relocs will have fence 283022944501Smrg * register allocated. 283122944501Smrg */ 28326260e5d5Smrgdrm_public void 283322944501Smrgdrm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr) 283422944501Smrg{ 283522944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 283622944501Smrg 283722944501Smrg if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2) 283820131375Smrg bufmgr_gem->fenced_relocs = true; 283922944501Smrg} 284022944501Smrg 284122944501Smrg/** 284222944501Smrg * Return the additional aperture space required by the tree of buffer objects 284322944501Smrg * rooted at bo. 284422944501Smrg */ 284522944501Smrgstatic int 284622944501Smrgdrm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo) 284722944501Smrg{ 284822944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 284922944501Smrg int i; 285022944501Smrg int total = 0; 285122944501Smrg 285222944501Smrg if (bo == NULL || bo_gem->included_in_check_aperture) 285322944501Smrg return 0; 285422944501Smrg 285522944501Smrg total += bo->size; 285620131375Smrg bo_gem->included_in_check_aperture = true; 285722944501Smrg 285822944501Smrg for (i = 0; i < bo_gem->reloc_count; i++) 285922944501Smrg total += 286022944501Smrg drm_intel_gem_bo_get_aperture_space(bo_gem-> 286122944501Smrg reloc_target_info[i].bo); 286222944501Smrg 286322944501Smrg return total; 286422944501Smrg} 286522944501Smrg 286622944501Smrg/** 286722944501Smrg * Count the number of buffers in this list that need a fence reg 286822944501Smrg * 286922944501Smrg * If the count is greater than the number of available regs, we'll have 287022944501Smrg * to ask the caller to resubmit a batch with fewer tiled buffers. 287122944501Smrg * 287222944501Smrg * This function over-counts if the same buffer is used multiple times. 287322944501Smrg */ 287422944501Smrgstatic unsigned int 287522944501Smrgdrm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count) 287622944501Smrg{ 287722944501Smrg int i; 287822944501Smrg unsigned int total = 0; 287922944501Smrg 288022944501Smrg for (i = 0; i < count; i++) { 288122944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; 288222944501Smrg 288322944501Smrg if (bo_gem == NULL) 288422944501Smrg continue; 288522944501Smrg 288622944501Smrg total += bo_gem->reloc_tree_fences; 288722944501Smrg } 288822944501Smrg return total; 288922944501Smrg} 289022944501Smrg 289122944501Smrg/** 289222944501Smrg * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready 289322944501Smrg * for the next drm_intel_bufmgr_check_aperture_space() call. 289422944501Smrg */ 289522944501Smrgstatic void 289622944501Smrgdrm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo) 289722944501Smrg{ 289822944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 289922944501Smrg int i; 290022944501Smrg 290122944501Smrg if (bo == NULL || !bo_gem->included_in_check_aperture) 290222944501Smrg return; 290322944501Smrg 290420131375Smrg bo_gem->included_in_check_aperture = false; 290522944501Smrg 290622944501Smrg for (i = 0; i < bo_gem->reloc_count; i++) 290722944501Smrg drm_intel_gem_bo_clear_aperture_space_flag(bo_gem-> 290822944501Smrg reloc_target_info[i].bo); 290922944501Smrg} 291022944501Smrg 291122944501Smrg/** 291222944501Smrg * Return a conservative estimate for the amount of aperture required 291322944501Smrg * for a collection of buffers. This may double-count some buffers. 291422944501Smrg */ 291522944501Smrgstatic unsigned int 291622944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count) 291722944501Smrg{ 291822944501Smrg int i; 291922944501Smrg unsigned int total = 0; 292022944501Smrg 292122944501Smrg for (i = 0; i < count; i++) { 292222944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; 292322944501Smrg if (bo_gem != NULL) 292422944501Smrg total += bo_gem->reloc_tree_size; 292522944501Smrg } 292622944501Smrg return total; 292722944501Smrg} 292822944501Smrg 292922944501Smrg/** 293022944501Smrg * Return the amount of aperture needed for a collection of buffers. 293122944501Smrg * This avoids double counting any buffers, at the cost of looking 293222944501Smrg * at every buffer in the set. 293322944501Smrg */ 293422944501Smrgstatic unsigned int 293522944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count) 293622944501Smrg{ 293722944501Smrg int i; 293822944501Smrg unsigned int total = 0; 293922944501Smrg 294022944501Smrg for (i = 0; i < count; i++) { 294122944501Smrg total += drm_intel_gem_bo_get_aperture_space(bo_array[i]); 294222944501Smrg /* For the first buffer object in the array, we get an 294322944501Smrg * accurate count back for its reloc_tree size (since nothing 294422944501Smrg * had been flagged as being counted yet). We can save that 294522944501Smrg * value out as a more conservative reloc_tree_size that 294622944501Smrg * avoids double-counting target buffers. Since the first 294722944501Smrg * buffer happens to usually be the batch buffer in our 294822944501Smrg * callers, this can pull us back from doing the tree 294922944501Smrg * walk on every new batch emit. 295022944501Smrg */ 295122944501Smrg if (i == 0) { 295222944501Smrg drm_intel_bo_gem *bo_gem = 295322944501Smrg (drm_intel_bo_gem *) bo_array[i]; 295422944501Smrg bo_gem->reloc_tree_size = total; 295522944501Smrg } 295622944501Smrg } 295722944501Smrg 295822944501Smrg for (i = 0; i < count; i++) 295922944501Smrg drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]); 296022944501Smrg return total; 296122944501Smrg} 296222944501Smrg 296322944501Smrg/** 296422944501Smrg * Return -1 if the batchbuffer should be flushed before attempting to 296522944501Smrg * emit rendering referencing the buffers pointed to by bo_array. 296622944501Smrg * 296722944501Smrg * This is required because if we try to emit a batchbuffer with relocations 296822944501Smrg * to a tree of buffers that won't simultaneously fit in the aperture, 296922944501Smrg * the rendering will return an error at a point where the software is not 297022944501Smrg * prepared to recover from it. 297122944501Smrg * 297222944501Smrg * However, we also want to emit the batchbuffer significantly before we reach 297322944501Smrg * the limit, as a series of batchbuffers each of which references buffers 297422944501Smrg * covering almost all of the aperture means that at each emit we end up 297522944501Smrg * waiting to evict a buffer from the last rendering, and we get synchronous 297622944501Smrg * performance. By emitting smaller batchbuffers, we eat some CPU overhead to 297722944501Smrg * get better parallelism. 297822944501Smrg */ 297922944501Smrgstatic int 298022944501Smrgdrm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count) 298122944501Smrg{ 298222944501Smrg drm_intel_bufmgr_gem *bufmgr_gem = 298322944501Smrg (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr; 298422944501Smrg unsigned int total = 0; 298522944501Smrg unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4; 298622944501Smrg int total_fences; 298722944501Smrg 298822944501Smrg /* Check for fence reg constraints if necessary */ 298922944501Smrg if (bufmgr_gem->available_fences) { 299022944501Smrg total_fences = drm_intel_gem_total_fences(bo_array, count); 299122944501Smrg if (total_fences > bufmgr_gem->available_fences) 299222944501Smrg return -ENOSPC; 299322944501Smrg } 299422944501Smrg 299522944501Smrg total = drm_intel_gem_estimate_batch_space(bo_array, count); 299622944501Smrg 299722944501Smrg if (total > threshold) 299822944501Smrg total = drm_intel_gem_compute_batch_space(bo_array, count); 299922944501Smrg 300022944501Smrg if (total > threshold) { 300122944501Smrg DBG("check_space: overflowed available aperture, " 300222944501Smrg "%dkb vs %dkb\n", 300322944501Smrg total / 1024, (int)bufmgr_gem->gtt_size / 1024); 300422944501Smrg return -ENOSPC; 300522944501Smrg } else { 300622944501Smrg DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024, 300722944501Smrg (int)bufmgr_gem->gtt_size / 1024); 300822944501Smrg return 0; 300922944501Smrg } 301022944501Smrg} 301122944501Smrg 301222944501Smrg/* 301322944501Smrg * Disable buffer reuse for objects which are shared with the kernel 301422944501Smrg * as scanout buffers 301522944501Smrg */ 301622944501Smrgstatic int 301722944501Smrgdrm_intel_gem_bo_disable_reuse(drm_intel_bo *bo) 301822944501Smrg{ 301922944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 302022944501Smrg 302120131375Smrg bo_gem->reusable = false; 302222944501Smrg return 0; 302322944501Smrg} 302422944501Smrg 3025aaba2545Smrgstatic int 3026aaba2545Smrgdrm_intel_gem_bo_is_reusable(drm_intel_bo *bo) 3027aaba2545Smrg{ 3028aaba2545Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 3029aaba2545Smrg 3030aaba2545Smrg return bo_gem->reusable; 3031aaba2545Smrg} 3032aaba2545Smrg 303322944501Smrgstatic int 303422944501Smrg_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) 303522944501Smrg{ 303622944501Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 303722944501Smrg int i; 303822944501Smrg 303922944501Smrg for (i = 0; i < bo_gem->reloc_count; i++) { 304022944501Smrg if (bo_gem->reloc_target_info[i].bo == target_bo) 304122944501Smrg return 1; 3042aaba2545Smrg if (bo == bo_gem->reloc_target_info[i].bo) 3043aaba2545Smrg continue; 304422944501Smrg if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo, 304522944501Smrg target_bo)) 304622944501Smrg return 1; 304722944501Smrg } 304822944501Smrg 3049fe517fc9Smrg for (i = 0; i< bo_gem->softpin_target_count; i++) { 3050fe517fc9Smrg if (bo_gem->softpin_target[i] == target_bo) 3051fe517fc9Smrg return 1; 3052fe517fc9Smrg if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo)) 3053fe517fc9Smrg return 1; 3054fe517fc9Smrg } 3055fe517fc9Smrg 305622944501Smrg return 0; 305722944501Smrg} 305822944501Smrg 305922944501Smrg/** Return true if target_bo is referenced by bo's relocation tree. */ 306022944501Smrgstatic int 306122944501Smrgdrm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) 306222944501Smrg{ 306322944501Smrg drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; 306422944501Smrg 306522944501Smrg if (bo == NULL || target_bo == NULL) 306622944501Smrg return 0; 306722944501Smrg if (target_bo_gem->used_as_reloc_target) 306822944501Smrg return _drm_intel_gem_bo_references(bo, target_bo); 306922944501Smrg return 0; 307022944501Smrg} 307122944501Smrg 3072aaba2545Smrgstatic void 3073aaba2545Smrgadd_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size) 3074aaba2545Smrg{ 3075aaba2545Smrg unsigned int i = bufmgr_gem->num_buckets; 3076aaba2545Smrg 3077aaba2545Smrg assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket)); 3078aaba2545Smrg 3079aaba2545Smrg DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head); 3080aaba2545Smrg bufmgr_gem->cache_bucket[i].size = size; 3081aaba2545Smrg bufmgr_gem->num_buckets++; 3082aaba2545Smrg} 3083aaba2545Smrg 3084aaba2545Smrgstatic void 3085aaba2545Smrginit_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem) 3086aaba2545Smrg{ 3087aaba2545Smrg unsigned long size, cache_max_size = 64 * 1024 * 1024; 3088aaba2545Smrg 3089aaba2545Smrg /* OK, so power of two buckets was too wasteful of memory. 3090aaba2545Smrg * Give 3 other sizes between each power of two, to hopefully 3091aaba2545Smrg * cover things accurately enough. (The alternative is 3092aaba2545Smrg * probably to just go for exact matching of sizes, and assume 3093aaba2545Smrg * that for things like composited window resize the tiled 3094aaba2545Smrg * width/height alignment and rounding of sizes to pages will 3095aaba2545Smrg * get us useful cache hit rates anyway) 3096aaba2545Smrg */ 3097aaba2545Smrg add_bucket(bufmgr_gem, 4096); 3098aaba2545Smrg add_bucket(bufmgr_gem, 4096 * 2); 3099aaba2545Smrg add_bucket(bufmgr_gem, 4096 * 3); 3100aaba2545Smrg 3101aaba2545Smrg /* Initialize the linked lists for BO reuse cache. */ 3102aaba2545Smrg for (size = 4 * 4096; size <= cache_max_size; size *= 2) { 3103aaba2545Smrg add_bucket(bufmgr_gem, size); 3104aaba2545Smrg 3105aaba2545Smrg add_bucket(bufmgr_gem, size + size * 1 / 4); 3106aaba2545Smrg add_bucket(bufmgr_gem, size + size * 2 / 4); 3107aaba2545Smrg add_bucket(bufmgr_gem, size + size * 3 / 4); 3108aaba2545Smrg } 3109aaba2545Smrg} 3110aaba2545Smrg 31116260e5d5Smrgdrm_public void 311220131375Smrgdrm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit) 311320131375Smrg{ 311420131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 311520131375Smrg 311620131375Smrg bufmgr_gem->vma_max = limit; 311720131375Smrg 311820131375Smrg drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 311920131375Smrg} 312020131375Smrg 31212ee35494Smrgstatic int 31222ee35494Smrgparse_devid_override(const char *devid_override) 31232ee35494Smrg{ 31242ee35494Smrg static const struct { 31252ee35494Smrg const char *name; 31262ee35494Smrg int pci_id; 31272ee35494Smrg } name_map[] = { 31282ee35494Smrg { "brw", PCI_CHIP_I965_GM }, 31292ee35494Smrg { "g4x", PCI_CHIP_GM45_GM }, 31302ee35494Smrg { "ilk", PCI_CHIP_ILD_G }, 31312ee35494Smrg { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS }, 31322ee35494Smrg { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 }, 31332ee35494Smrg { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 }, 31342ee35494Smrg { "byt", PCI_CHIP_VALLEYVIEW_3 }, 31352ee35494Smrg { "bdw", 0x1620 | BDW_ULX }, 31362ee35494Smrg { "skl", PCI_CHIP_SKYLAKE_DT_GT2 }, 31372ee35494Smrg { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 }, 31382ee35494Smrg }; 31392ee35494Smrg unsigned int i; 31402ee35494Smrg 31412ee35494Smrg for (i = 0; i < ARRAY_SIZE(name_map); i++) { 31422ee35494Smrg if (!strcmp(name_map[i].name, devid_override)) 31432ee35494Smrg return name_map[i].pci_id; 31442ee35494Smrg } 31452ee35494Smrg 31462ee35494Smrg return strtod(devid_override, NULL); 31472ee35494Smrg} 31482ee35494Smrg 314920131375Smrg/** 315020131375Smrg * Get the PCI ID for the device. This can be overridden by setting the 315120131375Smrg * INTEL_DEVID_OVERRIDE environment variable to the desired ID. 315220131375Smrg */ 315320131375Smrgstatic int 315420131375Smrgget_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem) 315520131375Smrg{ 315620131375Smrg char *devid_override; 3157424e9256Smrg int devid = 0; 315820131375Smrg int ret; 315920131375Smrg drm_i915_getparam_t gp; 316020131375Smrg 316120131375Smrg if (geteuid() == getuid()) { 316220131375Smrg devid_override = getenv("INTEL_DEVID_OVERRIDE"); 316320131375Smrg if (devid_override) { 316420131375Smrg bufmgr_gem->no_exec = true; 31652ee35494Smrg return parse_devid_override(devid_override); 316620131375Smrg } 316720131375Smrg } 316820131375Smrg 3169424e9256Smrg memclear(gp); 317020131375Smrg gp.param = I915_PARAM_CHIPSET_ID; 317120131375Smrg gp.value = &devid; 317220131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 317320131375Smrg if (ret) { 317420131375Smrg fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno); 317520131375Smrg fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value); 317620131375Smrg } 317720131375Smrg return devid; 317820131375Smrg} 317920131375Smrg 31806260e5d5Smrgdrm_public int 318120131375Smrgdrm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr) 318220131375Smrg{ 318320131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 318420131375Smrg 318520131375Smrg return bufmgr_gem->pci_device; 318620131375Smrg} 318720131375Smrg 318820131375Smrg/** 318920131375Smrg * Sets the AUB filename. 319020131375Smrg * 319120131375Smrg * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump() 319220131375Smrg * for it to have any effect. 319320131375Smrg */ 31946260e5d5Smrgdrm_public void 319520131375Smrgdrm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr, 319620131375Smrg const char *filename) 319720131375Smrg{ 319820131375Smrg} 319920131375Smrg 320020131375Smrg/** 320120131375Smrg * Sets up AUB dumping. 320220131375Smrg * 320320131375Smrg * This is a trace file format that can be used with the simulator. 320420131375Smrg * Packets are emitted in a format somewhat like GPU command packets. 320520131375Smrg * You can set up a GTT and upload your objects into the referenced 320620131375Smrg * space, then send off batchbuffers and get BMPs out the other end. 320720131375Smrg */ 32086260e5d5Smrgdrm_public void 320920131375Smrgdrm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable) 321020131375Smrg{ 3211fe517fc9Smrg fprintf(stderr, "libdrm aub dumping is deprecated.\n\n" 3212fe517fc9Smrg "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n" 3213fe517fc9Smrg "then run (for example)\n\n" 3214fe517fc9Smrg "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n" 3215fe517fc9Smrg "See the intel_aubdump man page for more details.\n"); 321620131375Smrg} 321720131375Smrg 32186260e5d5Smrgdrm_public drm_intel_context * 321920131375Smrgdrm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) 322020131375Smrg{ 322120131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 322220131375Smrg struct drm_i915_gem_context_create create; 322320131375Smrg drm_intel_context *context = NULL; 322420131375Smrg int ret; 322520131375Smrg 322620131375Smrg context = calloc(1, sizeof(*context)); 322720131375Smrg if (!context) 322820131375Smrg return NULL; 322920131375Smrg 3230424e9256Smrg memclear(create); 323120131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); 323220131375Smrg if (ret != 0) { 323320131375Smrg DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", 323420131375Smrg strerror(errno)); 323520131375Smrg free(context); 323620131375Smrg return NULL; 323720131375Smrg } 323820131375Smrg 323920131375Smrg context->ctx_id = create.ctx_id; 324020131375Smrg context->bufmgr = bufmgr; 324120131375Smrg 324220131375Smrg return context; 324320131375Smrg} 324420131375Smrg 32456260e5d5Smrgdrm_public int 32462ee35494Smrgdrm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id) 32472ee35494Smrg{ 32482ee35494Smrg if (ctx == NULL) 32492ee35494Smrg return -EINVAL; 32502ee35494Smrg 32512ee35494Smrg *ctx_id = ctx->ctx_id; 32522ee35494Smrg 32532ee35494Smrg return 0; 32542ee35494Smrg} 32552ee35494Smrg 32566260e5d5Smrgdrm_public void 325720131375Smrgdrm_intel_gem_context_destroy(drm_intel_context *ctx) 325820131375Smrg{ 325920131375Smrg drm_intel_bufmgr_gem *bufmgr_gem; 326020131375Smrg struct drm_i915_gem_context_destroy destroy; 326120131375Smrg int ret; 326220131375Smrg 326320131375Smrg if (ctx == NULL) 326420131375Smrg return; 326520131375Smrg 3266424e9256Smrg memclear(destroy); 326720131375Smrg 326820131375Smrg bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; 326920131375Smrg destroy.ctx_id = ctx->ctx_id; 327020131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, 327120131375Smrg &destroy); 327220131375Smrg if (ret != 0) 327320131375Smrg fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n", 327420131375Smrg strerror(errno)); 327520131375Smrg 327620131375Smrg free(ctx); 327720131375Smrg} 327820131375Smrg 32796260e5d5Smrgdrm_public int 328020131375Smrgdrm_intel_get_reset_stats(drm_intel_context *ctx, 328120131375Smrg uint32_t *reset_count, 328220131375Smrg uint32_t *active, 328320131375Smrg uint32_t *pending) 328420131375Smrg{ 328520131375Smrg drm_intel_bufmgr_gem *bufmgr_gem; 328620131375Smrg struct drm_i915_reset_stats stats; 328720131375Smrg int ret; 328820131375Smrg 328920131375Smrg if (ctx == NULL) 329020131375Smrg return -EINVAL; 329120131375Smrg 3292424e9256Smrg memclear(stats); 329320131375Smrg 329420131375Smrg bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; 329520131375Smrg stats.ctx_id = ctx->ctx_id; 329620131375Smrg ret = drmIoctl(bufmgr_gem->fd, 329720131375Smrg DRM_IOCTL_I915_GET_RESET_STATS, 329820131375Smrg &stats); 329920131375Smrg if (ret == 0) { 330020131375Smrg if (reset_count != NULL) 330120131375Smrg *reset_count = stats.reset_count; 330220131375Smrg 330320131375Smrg if (active != NULL) 330420131375Smrg *active = stats.batch_active; 330520131375Smrg 330620131375Smrg if (pending != NULL) 330720131375Smrg *pending = stats.batch_pending; 330820131375Smrg } 330920131375Smrg 331020131375Smrg return ret; 331120131375Smrg} 331220131375Smrg 33136260e5d5Smrgdrm_public int 331420131375Smrgdrm_intel_reg_read(drm_intel_bufmgr *bufmgr, 331520131375Smrg uint32_t offset, 331620131375Smrg uint64_t *result) 331720131375Smrg{ 331820131375Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 331920131375Smrg struct drm_i915_reg_read reg_read; 332020131375Smrg int ret; 332120131375Smrg 3322424e9256Smrg memclear(reg_read); 332320131375Smrg reg_read.offset = offset; 332420131375Smrg 332520131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read); 332620131375Smrg 332720131375Smrg *result = reg_read.val; 332820131375Smrg return ret; 332920131375Smrg} 333020131375Smrg 33316260e5d5Smrgdrm_public int 3332424e9256Smrgdrm_intel_get_subslice_total(int fd, unsigned int *subslice_total) 3333424e9256Smrg{ 3334424e9256Smrg drm_i915_getparam_t gp; 3335424e9256Smrg int ret; 3336424e9256Smrg 3337424e9256Smrg memclear(gp); 3338424e9256Smrg gp.value = (int*)subslice_total; 3339424e9256Smrg gp.param = I915_PARAM_SUBSLICE_TOTAL; 3340424e9256Smrg ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); 3341424e9256Smrg if (ret) 3342424e9256Smrg return -errno; 3343424e9256Smrg 3344424e9256Smrg return 0; 3345424e9256Smrg} 3346424e9256Smrg 33476260e5d5Smrgdrm_public int 3348424e9256Smrgdrm_intel_get_eu_total(int fd, unsigned int *eu_total) 3349424e9256Smrg{ 3350424e9256Smrg drm_i915_getparam_t gp; 3351424e9256Smrg int ret; 3352424e9256Smrg 3353424e9256Smrg memclear(gp); 3354424e9256Smrg gp.value = (int*)eu_total; 3355424e9256Smrg gp.param = I915_PARAM_EU_TOTAL; 3356424e9256Smrg ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); 3357424e9256Smrg if (ret) 3358424e9256Smrg return -errno; 3359424e9256Smrg 3360424e9256Smrg return 0; 3361424e9256Smrg} 336220131375Smrg 33636260e5d5Smrgdrm_public int 33642ee35494Smrgdrm_intel_get_pooled_eu(int fd) 33652ee35494Smrg{ 33662ee35494Smrg drm_i915_getparam_t gp; 33672ee35494Smrg int ret = -1; 33682ee35494Smrg 33692ee35494Smrg memclear(gp); 33702ee35494Smrg gp.param = I915_PARAM_HAS_POOLED_EU; 33712ee35494Smrg gp.value = &ret; 33722ee35494Smrg if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp)) 33732ee35494Smrg return -errno; 33742ee35494Smrg 33752ee35494Smrg return ret; 33762ee35494Smrg} 33772ee35494Smrg 33786260e5d5Smrgdrm_public int 33792ee35494Smrgdrm_intel_get_min_eu_in_pool(int fd) 33802ee35494Smrg{ 33812ee35494Smrg drm_i915_getparam_t gp; 33822ee35494Smrg int ret = -1; 33832ee35494Smrg 33842ee35494Smrg memclear(gp); 33852ee35494Smrg gp.param = I915_PARAM_MIN_EU_IN_POOL; 33862ee35494Smrg gp.value = &ret; 33872ee35494Smrg if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp)) 33882ee35494Smrg return -errno; 33892ee35494Smrg 33902ee35494Smrg return ret; 33912ee35494Smrg} 33922ee35494Smrg 339320131375Smrg/** 339420131375Smrg * Annotate the given bo for use in aub dumping. 339520131375Smrg * 339620131375Smrg * \param annotations is an array of drm_intel_aub_annotation objects 339720131375Smrg * describing the type of data in various sections of the bo. Each 339820131375Smrg * element of the array specifies the type and subtype of a section of 339920131375Smrg * the bo, and the past-the-end offset of that section. The elements 340020131375Smrg * of \c annotations must be sorted so that ending_offset is 340120131375Smrg * increasing. 340220131375Smrg * 340320131375Smrg * \param count is the number of elements in the \c annotations array. 340420131375Smrg * If \c count is zero, then \c annotations will not be dereferenced. 340520131375Smrg * 340620131375Smrg * Annotations are copied into a private data structure, so caller may 340720131375Smrg * re-use the memory pointed to by \c annotations after the call 340820131375Smrg * returns. 340920131375Smrg * 341020131375Smrg * Annotations are stored for the lifetime of the bo; to reset to the 341120131375Smrg * default state (no annotations), call this function with a \c count 341220131375Smrg * of zero. 341320131375Smrg */ 34146260e5d5Smrgdrm_public void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, 341520131375Smrg drm_intel_aub_annotation *annotations, 341620131375Smrg unsigned count) 341720131375Smrg{ 341820131375Smrg} 341920131375Smrg 3420a884aba1Smrgstatic pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER; 3421a884aba1Smrgstatic drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list }; 3422a884aba1Smrg 3423a884aba1Smrgstatic drm_intel_bufmgr_gem * 3424a884aba1Smrgdrm_intel_bufmgr_gem_find(int fd) 3425a884aba1Smrg{ 3426a884aba1Smrg drm_intel_bufmgr_gem *bufmgr_gem; 3427a884aba1Smrg 3428a884aba1Smrg DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) { 3429a884aba1Smrg if (bufmgr_gem->fd == fd) { 3430a884aba1Smrg atomic_inc(&bufmgr_gem->refcount); 3431a884aba1Smrg return bufmgr_gem; 3432a884aba1Smrg } 3433a884aba1Smrg } 3434a884aba1Smrg 3435a884aba1Smrg return NULL; 3436a884aba1Smrg} 3437a884aba1Smrg 3438a884aba1Smrgstatic void 3439a884aba1Smrgdrm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr) 3440a884aba1Smrg{ 3441a884aba1Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3442a884aba1Smrg 3443a884aba1Smrg if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) { 3444a884aba1Smrg pthread_mutex_lock(&bufmgr_list_mutex); 3445a884aba1Smrg 3446a884aba1Smrg if (atomic_dec_and_test(&bufmgr_gem->refcount)) { 3447a884aba1Smrg DRMLISTDEL(&bufmgr_gem->managers); 3448a884aba1Smrg drm_intel_bufmgr_gem_destroy(bufmgr); 3449a884aba1Smrg } 3450a884aba1Smrg 3451a884aba1Smrg pthread_mutex_unlock(&bufmgr_list_mutex); 3452a884aba1Smrg } 3453a884aba1Smrg} 3454a884aba1Smrg 34556260e5d5Smrgdrm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo) 34562ee35494Smrg{ 34572ee35494Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 34582ee35494Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 34592ee35494Smrg 34602ee35494Smrg if (bo_gem->gtt_virtual) 34612ee35494Smrg return bo_gem->gtt_virtual; 34622ee35494Smrg 34632ee35494Smrg if (bo_gem->is_userptr) 34642ee35494Smrg return NULL; 34652ee35494Smrg 34662ee35494Smrg pthread_mutex_lock(&bufmgr_gem->lock); 34672ee35494Smrg if (bo_gem->gtt_virtual == NULL) { 34682ee35494Smrg struct drm_i915_gem_mmap_gtt mmap_arg; 34692ee35494Smrg void *ptr; 34702ee35494Smrg 34712ee35494Smrg DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", 34722ee35494Smrg bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); 34732ee35494Smrg 34742ee35494Smrg if (bo_gem->map_count++ == 0) 34752ee35494Smrg drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); 34762ee35494Smrg 34772ee35494Smrg memclear(mmap_arg); 34782ee35494Smrg mmap_arg.handle = bo_gem->gem_handle; 34792ee35494Smrg 34802ee35494Smrg /* Get the fake offset back... */ 34812ee35494Smrg ptr = MAP_FAILED; 34822ee35494Smrg if (drmIoctl(bufmgr_gem->fd, 34832ee35494Smrg DRM_IOCTL_I915_GEM_MMAP_GTT, 34842ee35494Smrg &mmap_arg) == 0) { 34852ee35494Smrg /* and mmap it */ 34862ee35494Smrg ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, 34872ee35494Smrg MAP_SHARED, bufmgr_gem->fd, 34882ee35494Smrg mmap_arg.offset); 34892ee35494Smrg } 34902ee35494Smrg if (ptr == MAP_FAILED) { 34912ee35494Smrg if (--bo_gem->map_count == 0) 34922ee35494Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 34932ee35494Smrg ptr = NULL; 34942ee35494Smrg } 34952ee35494Smrg 34962ee35494Smrg bo_gem->gtt_virtual = ptr; 34972ee35494Smrg } 34982ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 34992ee35494Smrg 35002ee35494Smrg return bo_gem->gtt_virtual; 35012ee35494Smrg} 35022ee35494Smrg 35036260e5d5Smrgdrm_public void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo) 35042ee35494Smrg{ 35052ee35494Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 35062ee35494Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 35072ee35494Smrg 35082ee35494Smrg if (bo_gem->mem_virtual) 35092ee35494Smrg return bo_gem->mem_virtual; 35102ee35494Smrg 35112ee35494Smrg if (bo_gem->is_userptr) { 35122ee35494Smrg /* Return the same user ptr */ 35132ee35494Smrg return bo_gem->user_virtual; 35142ee35494Smrg } 35152ee35494Smrg 35162ee35494Smrg pthread_mutex_lock(&bufmgr_gem->lock); 35172ee35494Smrg if (!bo_gem->mem_virtual) { 35182ee35494Smrg struct drm_i915_gem_mmap mmap_arg; 35192ee35494Smrg 35202ee35494Smrg if (bo_gem->map_count++ == 0) 35212ee35494Smrg drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); 35222ee35494Smrg 35232ee35494Smrg DBG("bo_map: %d (%s), map_count=%d\n", 35242ee35494Smrg bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); 35252ee35494Smrg 35262ee35494Smrg memclear(mmap_arg); 35272ee35494Smrg mmap_arg.handle = bo_gem->gem_handle; 35282ee35494Smrg mmap_arg.size = bo->size; 35292ee35494Smrg if (drmIoctl(bufmgr_gem->fd, 35302ee35494Smrg DRM_IOCTL_I915_GEM_MMAP, 35312ee35494Smrg &mmap_arg)) { 35322ee35494Smrg DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", 35332ee35494Smrg __FILE__, __LINE__, bo_gem->gem_handle, 35342ee35494Smrg bo_gem->name, strerror(errno)); 35352ee35494Smrg if (--bo_gem->map_count == 0) 35362ee35494Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 35372ee35494Smrg } else { 35382ee35494Smrg VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); 35392ee35494Smrg bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr; 35402ee35494Smrg } 35412ee35494Smrg } 35422ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 35432ee35494Smrg 35442ee35494Smrg return bo_gem->mem_virtual; 35452ee35494Smrg} 35462ee35494Smrg 35476260e5d5Smrgdrm_public void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo) 35482ee35494Smrg{ 35492ee35494Smrg drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 35502ee35494Smrg drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 35512ee35494Smrg 35522ee35494Smrg if (bo_gem->wc_virtual) 35532ee35494Smrg return bo_gem->wc_virtual; 35542ee35494Smrg 35552ee35494Smrg if (bo_gem->is_userptr) 35562ee35494Smrg return NULL; 35572ee35494Smrg 35582ee35494Smrg pthread_mutex_lock(&bufmgr_gem->lock); 35592ee35494Smrg if (!bo_gem->wc_virtual) { 35602ee35494Smrg struct drm_i915_gem_mmap mmap_arg; 35612ee35494Smrg 35622ee35494Smrg if (bo_gem->map_count++ == 0) 35632ee35494Smrg drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); 35642ee35494Smrg 35652ee35494Smrg DBG("bo_map: %d (%s), map_count=%d\n", 35662ee35494Smrg bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); 35672ee35494Smrg 35682ee35494Smrg memclear(mmap_arg); 35692ee35494Smrg mmap_arg.handle = bo_gem->gem_handle; 35702ee35494Smrg mmap_arg.size = bo->size; 35712ee35494Smrg mmap_arg.flags = I915_MMAP_WC; 35722ee35494Smrg if (drmIoctl(bufmgr_gem->fd, 35732ee35494Smrg DRM_IOCTL_I915_GEM_MMAP, 35742ee35494Smrg &mmap_arg)) { 35752ee35494Smrg DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", 35762ee35494Smrg __FILE__, __LINE__, bo_gem->gem_handle, 35772ee35494Smrg bo_gem->name, strerror(errno)); 35782ee35494Smrg if (--bo_gem->map_count == 0) 35792ee35494Smrg drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 35802ee35494Smrg } else { 35812ee35494Smrg VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); 35822ee35494Smrg bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr; 35832ee35494Smrg } 35842ee35494Smrg } 35852ee35494Smrg pthread_mutex_unlock(&bufmgr_gem->lock); 35862ee35494Smrg 35872ee35494Smrg return bo_gem->wc_virtual; 35882ee35494Smrg} 35892ee35494Smrg 359022944501Smrg/** 359122944501Smrg * Initializes the GEM buffer manager, which uses the kernel to allocate, map, 359222944501Smrg * and manage map buffer objections. 359322944501Smrg * 359422944501Smrg * \param fd File descriptor of the opened DRM device. 359522944501Smrg */ 35966260e5d5Smrgdrm_public drm_intel_bufmgr * 359722944501Smrgdrm_intel_bufmgr_gem_init(int fd, int batch_size) 359822944501Smrg{ 359922944501Smrg drm_intel_bufmgr_gem *bufmgr_gem; 360022944501Smrg struct drm_i915_gem_get_aperture aperture; 360122944501Smrg drm_i915_getparam_t gp; 360220131375Smrg int ret, tmp; 360320131375Smrg bool exec2 = false; 360422944501Smrg 3605a884aba1Smrg pthread_mutex_lock(&bufmgr_list_mutex); 3606a884aba1Smrg 3607a884aba1Smrg bufmgr_gem = drm_intel_bufmgr_gem_find(fd); 3608a884aba1Smrg if (bufmgr_gem) 3609a884aba1Smrg goto exit; 3610a884aba1Smrg 361122944501Smrg bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); 361222944501Smrg if (bufmgr_gem == NULL) 3613a884aba1Smrg goto exit; 361422944501Smrg 361522944501Smrg bufmgr_gem->fd = fd; 3616a884aba1Smrg atomic_set(&bufmgr_gem->refcount, 1); 361722944501Smrg 361822944501Smrg if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { 361922944501Smrg free(bufmgr_gem); 3620a884aba1Smrg bufmgr_gem = NULL; 3621a884aba1Smrg goto exit; 362222944501Smrg } 362322944501Smrg 3624424e9256Smrg memclear(aperture); 36256d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, 36266d98c517Smrg DRM_IOCTL_I915_GEM_GET_APERTURE, 36276d98c517Smrg &aperture); 362822944501Smrg 362922944501Smrg if (ret == 0) 363022944501Smrg bufmgr_gem->gtt_size = aperture.aper_available_size; 363122944501Smrg else { 363222944501Smrg fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", 363322944501Smrg strerror(errno)); 363422944501Smrg bufmgr_gem->gtt_size = 128 * 1024 * 1024; 363522944501Smrg fprintf(stderr, "Assuming %dkB available aperture size.\n" 363622944501Smrg "May lead to reduced performance or incorrect " 363722944501Smrg "rendering.\n", 363822944501Smrg (int)bufmgr_gem->gtt_size / 1024); 363922944501Smrg } 364022944501Smrg 364120131375Smrg bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem); 364222944501Smrg 364320131375Smrg if (IS_GEN2(bufmgr_gem->pci_device)) 364422944501Smrg bufmgr_gem->gen = 2; 364520131375Smrg else if (IS_GEN3(bufmgr_gem->pci_device)) 364622944501Smrg bufmgr_gem->gen = 3; 364720131375Smrg else if (IS_GEN4(bufmgr_gem->pci_device)) 364822944501Smrg bufmgr_gem->gen = 4; 364920131375Smrg else if (IS_GEN5(bufmgr_gem->pci_device)) 365020131375Smrg bufmgr_gem->gen = 5; 365120131375Smrg else if (IS_GEN6(bufmgr_gem->pci_device)) 365222944501Smrg bufmgr_gem->gen = 6; 365320131375Smrg else if (IS_GEN7(bufmgr_gem->pci_device)) 365420131375Smrg bufmgr_gem->gen = 7; 365520131375Smrg else if (IS_GEN8(bufmgr_gem->pci_device)) 365620131375Smrg bufmgr_gem->gen = 8; 36576260e5d5Smrg else if (!intel_get_genx(bufmgr_gem->pci_device, &bufmgr_gem->gen)) { 365820131375Smrg free(bufmgr_gem); 3659a884aba1Smrg bufmgr_gem = NULL; 3660a884aba1Smrg goto exit; 366120131375Smrg } 366220131375Smrg 366320131375Smrg if (IS_GEN3(bufmgr_gem->pci_device) && 366420131375Smrg bufmgr_gem->gtt_size > 256*1024*1024) { 366520131375Smrg /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't 366620131375Smrg * be used for tiled blits. To simplify the accounting, just 3667fe517fc9Smrg * subtract the unmappable part (fixed to 256MB on all known 366820131375Smrg * gen3 devices) if the kernel advertises it. */ 366920131375Smrg bufmgr_gem->gtt_size -= 256*1024*1024; 367020131375Smrg } 367120131375Smrg 3672424e9256Smrg memclear(gp); 367320131375Smrg gp.value = &tmp; 367422944501Smrg 367522944501Smrg gp.param = I915_PARAM_HAS_EXECBUF2; 36766d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 367722944501Smrg if (!ret) 367820131375Smrg exec2 = true; 367922944501Smrg 3680aaba2545Smrg gp.param = I915_PARAM_HAS_BSD; 36816d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 36829ce4edccSmrg bufmgr_gem->has_bsd = ret == 0; 36839ce4edccSmrg 36849ce4edccSmrg gp.param = I915_PARAM_HAS_BLT; 36859ce4edccSmrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 36869ce4edccSmrg bufmgr_gem->has_blt = ret == 0; 36879ce4edccSmrg 36889ce4edccSmrg gp.param = I915_PARAM_HAS_RELAXED_FENCING; 36899ce4edccSmrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 36909ce4edccSmrg bufmgr_gem->has_relaxed_fencing = ret == 0; 3691aaba2545Smrg 36922ee35494Smrg gp.param = I915_PARAM_HAS_EXEC_ASYNC; 36932ee35494Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 36942ee35494Smrg bufmgr_gem->has_exec_async = ret == 0; 36952ee35494Smrg 3696424e9256Smrg bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr; 3697a884aba1Smrg 369820131375Smrg gp.param = I915_PARAM_HAS_WAIT_TIMEOUT; 369920131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 370020131375Smrg bufmgr_gem->has_wait_timeout = ret == 0; 370120131375Smrg 370220131375Smrg gp.param = I915_PARAM_HAS_LLC; 370320131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 370420131375Smrg if (ret != 0) { 370520131375Smrg /* Kernel does not supports HAS_LLC query, fallback to GPU 370620131375Smrg * generation detection and assume that we have LLC on GEN6/7 370720131375Smrg */ 370820131375Smrg bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) | 370920131375Smrg IS_GEN7(bufmgr_gem->pci_device)); 371020131375Smrg } else 371120131375Smrg bufmgr_gem->has_llc = *gp.value; 371220131375Smrg 371320131375Smrg gp.param = I915_PARAM_HAS_VEBOX; 371420131375Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 371520131375Smrg bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0); 371620131375Smrg 3717fe517fc9Smrg gp.param = I915_PARAM_HAS_EXEC_SOFTPIN; 3718fe517fc9Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3719fe517fc9Smrg if (ret == 0 && *gp.value > 0) 3720fe517fc9Smrg bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset; 3721fe517fc9Smrg 372222944501Smrg if (bufmgr_gem->gen < 4) { 372322944501Smrg gp.param = I915_PARAM_NUM_FENCES_AVAIL; 372422944501Smrg gp.value = &bufmgr_gem->available_fences; 37256d98c517Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 372622944501Smrg if (ret) { 372722944501Smrg fprintf(stderr, "get fences failed: %d [%d]\n", ret, 372822944501Smrg errno); 372922944501Smrg fprintf(stderr, "param: %d, val: %d\n", gp.param, 373022944501Smrg *gp.value); 373122944501Smrg bufmgr_gem->available_fences = 0; 373222944501Smrg } else { 373322944501Smrg /* XXX The kernel reports the total number of fences, 373422944501Smrg * including any that may be pinned. 373522944501Smrg * 373622944501Smrg * We presume that there will be at least one pinned 373722944501Smrg * fence for the scanout buffer, but there may be more 373822944501Smrg * than one scanout and the user may be manually 373922944501Smrg * pinning buffers. Let's move to execbuffer2 and 374022944501Smrg * thereby forget the insanity of using fences... 374122944501Smrg */ 374222944501Smrg bufmgr_gem->available_fences -= 2; 374322944501Smrg if (bufmgr_gem->available_fences < 0) 374422944501Smrg bufmgr_gem->available_fences = 0; 374522944501Smrg } 374622944501Smrg } 374722944501Smrg 3748fe517fc9Smrg if (bufmgr_gem->gen >= 8) { 3749fe517fc9Smrg gp.param = I915_PARAM_HAS_ALIASING_PPGTT; 3750fe517fc9Smrg ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3751fe517fc9Smrg if (ret == 0 && *gp.value == 3) 3752fe517fc9Smrg bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range; 3753fe517fc9Smrg } 3754fe517fc9Smrg 375522944501Smrg /* Let's go with one relocation per every 2 dwords (but round down a bit 375622944501Smrg * since a power of two will mean an extra page allocation for the reloc 375722944501Smrg * buffer). 375822944501Smrg * 375922944501Smrg * Every 4 was too few for the blender benchmark. 376022944501Smrg */ 376122944501Smrg bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; 376222944501Smrg 376322944501Smrg bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; 376422944501Smrg bufmgr_gem->bufmgr.bo_alloc_for_render = 376522944501Smrg drm_intel_gem_bo_alloc_for_render; 376622944501Smrg bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled; 376722944501Smrg bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference; 376822944501Smrg bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference; 376922944501Smrg bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map; 377022944501Smrg bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap; 377122944501Smrg bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata; 377222944501Smrg bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; 377322944501Smrg bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering; 377422944501Smrg bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc; 377522944501Smrg bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence; 377622944501Smrg bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin; 377722944501Smrg bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin; 377822944501Smrg bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling; 377922944501Smrg bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling; 378022944501Smrg bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink; 378122944501Smrg /* Use the new one if available */ 3782aaba2545Smrg if (exec2) { 378322944501Smrg bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2; 37849ce4edccSmrg bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2; 3785aaba2545Smrg } else 378622944501Smrg bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; 378722944501Smrg bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; 378822944501Smrg bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; 3789a884aba1Smrg bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref; 379022944501Smrg bufmgr_gem->bufmgr.debug = 0; 379122944501Smrg bufmgr_gem->bufmgr.check_aperture_space = 379222944501Smrg drm_intel_gem_check_aperture_space; 379322944501Smrg bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse; 3794aaba2545Smrg bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable; 379522944501Smrg bufmgr_gem->bufmgr.get_pipe_from_crtc_id = 379622944501Smrg drm_intel_gem_get_pipe_from_crtc_id; 379722944501Smrg bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references; 379822944501Smrg 3799aaba2545Smrg init_cache_buckets(bufmgr_gem); 380022944501Smrg 380120131375Smrg DRMINITLISTHEAD(&bufmgr_gem->vma_cache); 380220131375Smrg bufmgr_gem->vma_max = -1; /* unlimited by default */ 380320131375Smrg 3804a884aba1Smrg DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list); 3805a884aba1Smrg 3806a884aba1Smrgexit: 3807a884aba1Smrg pthread_mutex_unlock(&bufmgr_list_mutex); 3808a884aba1Smrg 3809a884aba1Smrg return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL; 381022944501Smrg} 3811