intel_bufmgr_gem.c revision 87bf8e7c
122944501Smrg/**************************************************************************
222944501Smrg *
322944501Smrg * Copyright � 2007 Red Hat Inc.
420131375Smrg * Copyright � 2007-2012 Intel Corporation
522944501Smrg * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
622944501Smrg * All Rights Reserved.
722944501Smrg *
822944501Smrg * Permission is hereby granted, free of charge, to any person obtaining a
922944501Smrg * copy of this software and associated documentation files (the
1022944501Smrg * "Software"), to deal in the Software without restriction, including
1122944501Smrg * without limitation the rights to use, copy, modify, merge, publish,
1222944501Smrg * distribute, sub license, and/or sell copies of the Software, and to
1322944501Smrg * permit persons to whom the Software is furnished to do so, subject to
1422944501Smrg * the following conditions:
1522944501Smrg *
1622944501Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1722944501Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1822944501Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1922944501Smrg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
2022944501Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
2122944501Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2222944501Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
2322944501Smrg *
2422944501Smrg * The above copyright notice and this permission notice (including the
2522944501Smrg * next paragraph) shall be included in all copies or substantial portions
2622944501Smrg * of the Software.
2722944501Smrg *
2822944501Smrg *
2922944501Smrg **************************************************************************/
3022944501Smrg/*
3122944501Smrg * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
3222944501Smrg *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
3322944501Smrg *	    Eric Anholt <eric@anholt.net>
3422944501Smrg *	    Dave Airlie <airlied@linux.ie>
3522944501Smrg */
3622944501Smrg
3722944501Smrg#include <xf86drm.h>
3822944501Smrg#include <xf86atomic.h>
3922944501Smrg#include <fcntl.h>
4022944501Smrg#include <stdio.h>
4122944501Smrg#include <stdlib.h>
4222944501Smrg#include <string.h>
4322944501Smrg#include <unistd.h>
4422944501Smrg#include <assert.h>
4522944501Smrg#include <pthread.h>
4622944501Smrg#include <sys/ioctl.h>
4722944501Smrg#include <sys/stat.h>
4822944501Smrg#include <sys/types.h>
4920131375Smrg#include <stdbool.h>
5022944501Smrg
5122944501Smrg#include "errno.h"
5220131375Smrg#ifndef ETIME
5320131375Smrg#define ETIME ETIMEDOUT
5420131375Smrg#endif
55424e9256Smrg#include "libdrm_macros.h"
5622944501Smrg#include "libdrm_lists.h"
5722944501Smrg#include "intel_bufmgr.h"
5822944501Smrg#include "intel_bufmgr_priv.h"
5922944501Smrg#include "intel_chipset.h"
6022944501Smrg#include "string.h"
6122944501Smrg
6222944501Smrg#include "i915_drm.h"
632ee35494Smrg#include "uthash.h"
6422944501Smrg
652b90624aSmrg#if HAVE_VALGRIND
6620131375Smrg#include <valgrind.h>
6720131375Smrg#include <memcheck.h>
6820131375Smrg#define VG(x) x
6920131375Smrg#else
7020131375Smrg#define VG(x)
7120131375Smrg#endif
7220131375Smrg
73424e9256Smrg#define memclear(s) memset(&s, 0, sizeof(s))
7420131375Smrg
7522944501Smrg#define DBG(...) do {					\
7622944501Smrg	if (bufmgr_gem->bufmgr.debug)			\
7722944501Smrg		fprintf(stderr, __VA_ARGS__);		\
7822944501Smrg} while (0)
7922944501Smrg
80aaba2545Smrg#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
81fe517fc9Smrg#define MAX2(A, B) ((A) > (B) ? (A) : (B))
82fe517fc9Smrg
83fe517fc9Smrg/**
84fe517fc9Smrg * upper_32_bits - return bits 32-63 of a number
85fe517fc9Smrg * @n: the number we're accessing
86fe517fc9Smrg *
87fe517fc9Smrg * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
88fe517fc9Smrg * the "right shift count >= width of type" warning when that quantity is
89fe517fc9Smrg * 32-bits.
90fe517fc9Smrg */
91fe517fc9Smrg#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
92fe517fc9Smrg
93fe517fc9Smrg/**
94fe517fc9Smrg * lower_32_bits - return bits 0-31 of a number
95fe517fc9Smrg * @n: the number we're accessing
96fe517fc9Smrg */
97fe517fc9Smrg#define lower_32_bits(n) ((__u32)(n))
98aaba2545Smrg
9922944501Smrgtypedef struct _drm_intel_bo_gem drm_intel_bo_gem;
10022944501Smrg
10122944501Smrgstruct drm_intel_gem_bo_bucket {
10222944501Smrg	drmMMListHead head;
10322944501Smrg	unsigned long size;
10422944501Smrg};
10522944501Smrg
10622944501Smrgtypedef struct _drm_intel_bufmgr_gem {
10722944501Smrg	drm_intel_bufmgr bufmgr;
10822944501Smrg
109a884aba1Smrg	atomic_t refcount;
110a884aba1Smrg
11122944501Smrg	int fd;
11222944501Smrg
11322944501Smrg	int max_relocs;
11422944501Smrg
11522944501Smrg	pthread_mutex_t lock;
11622944501Smrg
11722944501Smrg	struct drm_i915_gem_exec_object *exec_objects;
11822944501Smrg	struct drm_i915_gem_exec_object2 *exec2_objects;
11922944501Smrg	drm_intel_bo **exec_bos;
12022944501Smrg	int exec_size;
12122944501Smrg	int exec_count;
12222944501Smrg
12322944501Smrg	/** Array of lists of cached gem objects of power-of-two sizes */
124aaba2545Smrg	struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
125aaba2545Smrg	int num_buckets;
1266d98c517Smrg	time_t time;
12722944501Smrg
128a884aba1Smrg	drmMMListHead managers;
129a884aba1Smrg
1302ee35494Smrg	drm_intel_bo_gem *name_table;
1312ee35494Smrg	drm_intel_bo_gem *handle_table;
1322ee35494Smrg
13320131375Smrg	drmMMListHead vma_cache;
13420131375Smrg	int vma_count, vma_open, vma_max;
13520131375Smrg
13622944501Smrg	uint64_t gtt_size;
13722944501Smrg	int available_fences;
13822944501Smrg	int pci_device;
13922944501Smrg	int gen;
1409ce4edccSmrg	unsigned int has_bsd : 1;
1419ce4edccSmrg	unsigned int has_blt : 1;
1429ce4edccSmrg	unsigned int has_relaxed_fencing : 1;
14320131375Smrg	unsigned int has_llc : 1;
14420131375Smrg	unsigned int has_wait_timeout : 1;
1459ce4edccSmrg	unsigned int bo_reuse : 1;
14620131375Smrg	unsigned int no_exec : 1;
14720131375Smrg	unsigned int has_vebox : 1;
1482ee35494Smrg	unsigned int has_exec_async : 1;
14920131375Smrg	bool fenced_relocs;
15020131375Smrg
151424e9256Smrg	struct {
152424e9256Smrg		void *ptr;
153424e9256Smrg		uint32_t handle;
154424e9256Smrg	} userptr_active;
155424e9256Smrg
15622944501Smrg} drm_intel_bufmgr_gem;
15722944501Smrg
15822944501Smrg#define DRM_INTEL_RELOC_FENCE (1<<0)
15922944501Smrg
16022944501Smrgtypedef struct _drm_intel_reloc_target_info {
16122944501Smrg	drm_intel_bo *bo;
16222944501Smrg	int flags;
16322944501Smrg} drm_intel_reloc_target;
16422944501Smrg
16522944501Smrgstruct _drm_intel_bo_gem {
16622944501Smrg	drm_intel_bo bo;
16722944501Smrg
16822944501Smrg	atomic_t refcount;
16922944501Smrg	uint32_t gem_handle;
17022944501Smrg	const char *name;
17122944501Smrg
17222944501Smrg	/**
17322944501Smrg	 * Kenel-assigned global name for this object
17420131375Smrg         *
17520131375Smrg         * List contains both flink named and prime fd'd objects
17622944501Smrg	 */
17722944501Smrg	unsigned int global_name;
1782ee35494Smrg
1792ee35494Smrg	UT_hash_handle handle_hh;
1802ee35494Smrg	UT_hash_handle name_hh;
18122944501Smrg
18222944501Smrg	/**
18322944501Smrg	 * Index of the buffer within the validation list while preparing a
18422944501Smrg	 * batchbuffer execution.
18522944501Smrg	 */
18622944501Smrg	int validate_index;
18722944501Smrg
18822944501Smrg	/**
18922944501Smrg	 * Current tiling mode
19022944501Smrg	 */
19122944501Smrg	uint32_t tiling_mode;
19222944501Smrg	uint32_t swizzle_mode;
1936d98c517Smrg	unsigned long stride;
19422944501Smrg
1952ee35494Smrg	unsigned long kflags;
1962ee35494Smrg
19722944501Smrg	time_t free_time;
19822944501Smrg
19922944501Smrg	/** Array passed to the DRM containing relocation information. */
20022944501Smrg	struct drm_i915_gem_relocation_entry *relocs;
20122944501Smrg	/**
20222944501Smrg	 * Array of info structs corresponding to relocs[i].target_handle etc
20322944501Smrg	 */
20422944501Smrg	drm_intel_reloc_target *reloc_target_info;
20522944501Smrg	/** Number of entries in relocs */
20622944501Smrg	int reloc_count;
207fe517fc9Smrg	/** Array of BOs that are referenced by this buffer and will be softpinned */
208fe517fc9Smrg	drm_intel_bo **softpin_target;
209fe517fc9Smrg	/** Number softpinned BOs that are referenced by this buffer */
210fe517fc9Smrg	int softpin_target_count;
211fe517fc9Smrg	/** Maximum amount of softpinned BOs that are referenced by this buffer */
212fe517fc9Smrg	int softpin_target_size;
213fe517fc9Smrg
21422944501Smrg	/** Mapped address for the buffer, saved across map/unmap cycles */
21522944501Smrg	void *mem_virtual;
21622944501Smrg	/** GTT virtual address for the buffer, saved across map/unmap cycles */
21722944501Smrg	void *gtt_virtual;
2182ee35494Smrg	/** WC CPU address for the buffer, saved across map/unmap cycles */
2192ee35494Smrg	void *wc_virtual;
220a884aba1Smrg	/**
221a884aba1Smrg	 * Virtual address of the buffer allocated by user, used for userptr
222a884aba1Smrg	 * objects only.
223a884aba1Smrg	 */
224a884aba1Smrg	void *user_virtual;
22520131375Smrg	int map_count;
22620131375Smrg	drmMMListHead vma_list;
22722944501Smrg
22822944501Smrg	/** BO cache list */
22922944501Smrg	drmMMListHead head;
23022944501Smrg
23122944501Smrg	/**
23222944501Smrg	 * Boolean of whether this BO and its children have been included in
23322944501Smrg	 * the current drm_intel_bufmgr_check_aperture_space() total.
23422944501Smrg	 */
23520131375Smrg	bool included_in_check_aperture;
23622944501Smrg
23722944501Smrg	/**
23822944501Smrg	 * Boolean of whether this buffer has been used as a relocation
23922944501Smrg	 * target and had its size accounted for, and thus can't have any
24022944501Smrg	 * further relocations added to it.
24122944501Smrg	 */
24220131375Smrg	bool used_as_reloc_target;
24322944501Smrg
24422944501Smrg	/**
24522944501Smrg	 * Boolean of whether we have encountered an error whilst building the relocation tree.
24622944501Smrg	 */
24720131375Smrg	bool has_error;
24822944501Smrg
24922944501Smrg	/**
25022944501Smrg	 * Boolean of whether this buffer can be re-used
25122944501Smrg	 */
25220131375Smrg	bool reusable;
25320131375Smrg
25420131375Smrg	/**
25520131375Smrg	 * Boolean of whether the GPU is definitely not accessing the buffer.
25620131375Smrg	 *
25720131375Smrg	 * This is only valid when reusable, since non-reusable
2582ee35494Smrg	 * buffers are those that have been shared with other
25920131375Smrg	 * processes, so we don't know their state.
26020131375Smrg	 */
26120131375Smrg	bool idle;
26222944501Smrg
263a884aba1Smrg	/**
264a884aba1Smrg	 * Boolean of whether this buffer was allocated with userptr
265a884aba1Smrg	 */
266a884aba1Smrg	bool is_userptr;
267a884aba1Smrg
26822944501Smrg	/**
26922944501Smrg	 * Size in bytes of this buffer and its relocation descendents.
27022944501Smrg	 *
27122944501Smrg	 * Used to avoid costly tree walking in
27222944501Smrg	 * drm_intel_bufmgr_check_aperture in the common case.
27322944501Smrg	 */
27422944501Smrg	int reloc_tree_size;
27522944501Smrg
27622944501Smrg	/**
27722944501Smrg	 * Number of potential fence registers required by this buffer and its
27822944501Smrg	 * relocations.
27922944501Smrg	 */
28022944501Smrg	int reloc_tree_fences;
28120131375Smrg
2822ee35494Smrg	/** Flags that we may need to do the SW_FINISH ioctl on unmap. */
28320131375Smrg	bool mapped_cpu_write;
28422944501Smrg};
28522944501Smrg
28622944501Smrgstatic unsigned int
28722944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
28822944501Smrg
28922944501Smrgstatic unsigned int
29022944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
29122944501Smrg
29222944501Smrgstatic int
29322944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
29422944501Smrg			    uint32_t * swizzle_mode);
29522944501Smrg
29622944501Smrgstatic int
2976d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2986d98c517Smrg				     uint32_t tiling_mode,
2996d98c517Smrg				     uint32_t stride);
30022944501Smrg
30122944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
30222944501Smrg						      time_t time);
30322944501Smrg
30422944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
30522944501Smrg
30622944501Smrgstatic void drm_intel_gem_bo_free(drm_intel_bo *bo);
30722944501Smrg
308fe517fc9Smrgstatic inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
309fe517fc9Smrg{
310fe517fc9Smrg        return (drm_intel_bo_gem *)bo;
311fe517fc9Smrg}
312fe517fc9Smrg
31322944501Smrgstatic unsigned long
31422944501Smrgdrm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
31522944501Smrg			   uint32_t *tiling_mode)
31622944501Smrg{
31722944501Smrg	unsigned long min_size, max_size;
31822944501Smrg	unsigned long i;
31922944501Smrg
32022944501Smrg	if (*tiling_mode == I915_TILING_NONE)
32122944501Smrg		return size;
32222944501Smrg
32322944501Smrg	/* 965+ just need multiples of page size for tiling */
32422944501Smrg	if (bufmgr_gem->gen >= 4)
32522944501Smrg		return ROUND_UP_TO(size, 4096);
32622944501Smrg
32722944501Smrg	/* Older chips need powers of two, of at least 512k or 1M */
32822944501Smrg	if (bufmgr_gem->gen == 3) {
32922944501Smrg		min_size = 1024*1024;
33022944501Smrg		max_size = 128*1024*1024;
33122944501Smrg	} else {
33222944501Smrg		min_size = 512*1024;
33322944501Smrg		max_size = 64*1024*1024;
33422944501Smrg	}
33522944501Smrg
33622944501Smrg	if (size > max_size) {
33722944501Smrg		*tiling_mode = I915_TILING_NONE;
33822944501Smrg		return size;
33922944501Smrg	}
34022944501Smrg
3419ce4edccSmrg	/* Do we need to allocate every page for the fence? */
3429ce4edccSmrg	if (bufmgr_gem->has_relaxed_fencing)
3439ce4edccSmrg		return ROUND_UP_TO(size, 4096);
3449ce4edccSmrg
34522944501Smrg	for (i = min_size; i < size; i <<= 1)
34622944501Smrg		;
34722944501Smrg
34822944501Smrg	return i;
34922944501Smrg}
35022944501Smrg
35122944501Smrg/*
35222944501Smrg * Round a given pitch up to the minimum required for X tiling on a
35322944501Smrg * given chip.  We use 512 as the minimum to allow for a later tiling
35422944501Smrg * change.
35522944501Smrg */
35622944501Smrgstatic unsigned long
35722944501Smrgdrm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
3586d98c517Smrg			    unsigned long pitch, uint32_t *tiling_mode)
35922944501Smrg{
36022944501Smrg	unsigned long tile_width;
36122944501Smrg	unsigned long i;
36222944501Smrg
36322944501Smrg	/* If untiled, then just align it so that we can do rendering
36422944501Smrg	 * to it with the 3D engine.
36522944501Smrg	 */
3666d98c517Smrg	if (*tiling_mode == I915_TILING_NONE)
36722944501Smrg		return ALIGN(pitch, 64);
36822944501Smrg
36920131375Smrg	if (*tiling_mode == I915_TILING_X
37020131375Smrg			|| (IS_915(bufmgr_gem->pci_device)
37120131375Smrg			    && *tiling_mode == I915_TILING_Y))
37222944501Smrg		tile_width = 512;
37322944501Smrg	else
37422944501Smrg		tile_width = 128;
37522944501Smrg
37622944501Smrg	/* 965 is flexible */
37722944501Smrg	if (bufmgr_gem->gen >= 4)
37822944501Smrg		return ROUND_UP_TO(pitch, tile_width);
37922944501Smrg
3806d98c517Smrg	/* The older hardware has a maximum pitch of 8192 with tiled
3816d98c517Smrg	 * surfaces, so fallback to untiled if it's too large.
3826d98c517Smrg	 */
3836d98c517Smrg	if (pitch > 8192) {
3846d98c517Smrg		*tiling_mode = I915_TILING_NONE;
3856d98c517Smrg		return ALIGN(pitch, 64);
3866d98c517Smrg	}
3876d98c517Smrg
38822944501Smrg	/* Pre-965 needs power of two tile width */
38922944501Smrg	for (i = tile_width; i < pitch; i <<= 1)
39022944501Smrg		;
39122944501Smrg
39222944501Smrg	return i;
39322944501Smrg}
39422944501Smrg
39522944501Smrgstatic struct drm_intel_gem_bo_bucket *
39622944501Smrgdrm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
39722944501Smrg				 unsigned long size)
39822944501Smrg{
39922944501Smrg	int i;
40022944501Smrg
401aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
40222944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
40322944501Smrg		    &bufmgr_gem->cache_bucket[i];
40422944501Smrg		if (bucket->size >= size) {
40522944501Smrg			return bucket;
40622944501Smrg		}
40722944501Smrg	}
40822944501Smrg
40922944501Smrg	return NULL;
41022944501Smrg}
41122944501Smrg
41222944501Smrgstatic void
41322944501Smrgdrm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
41422944501Smrg{
41522944501Smrg	int i, j;
41622944501Smrg
41722944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
41822944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
41922944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
42022944501Smrg
421fe517fc9Smrg		if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
422fe517fc9Smrg			DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
4230655efefSmrg			    bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
42422944501Smrg			    bo_gem->name);
42522944501Smrg			continue;
42622944501Smrg		}
42722944501Smrg
42822944501Smrg		for (j = 0; j < bo_gem->reloc_count; j++) {
42922944501Smrg			drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
43022944501Smrg			drm_intel_bo_gem *target_gem =
43122944501Smrg			    (drm_intel_bo_gem *) target_bo;
43222944501Smrg
433fe517fc9Smrg			DBG("%2d: %d %s(%s)@0x%08x %08x -> "
434fe517fc9Smrg			    "%d (%s)@0x%08x %08x + 0x%08x\n",
43522944501Smrg			    i,
436fe517fc9Smrg			    bo_gem->gem_handle,
4370655efefSmrg			    bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
438fe517fc9Smrg			    bo_gem->name,
439fe517fc9Smrg			    upper_32_bits(bo_gem->relocs[j].offset),
440fe517fc9Smrg			    lower_32_bits(bo_gem->relocs[j].offset),
44122944501Smrg			    target_gem->gem_handle,
44222944501Smrg			    target_gem->name,
443fe517fc9Smrg			    upper_32_bits(target_bo->offset64),
444fe517fc9Smrg			    lower_32_bits(target_bo->offset64),
44522944501Smrg			    bo_gem->relocs[j].delta);
44622944501Smrg		}
447fe517fc9Smrg
448fe517fc9Smrg		for (j = 0; j < bo_gem->softpin_target_count; j++) {
449fe517fc9Smrg			drm_intel_bo *target_bo = bo_gem->softpin_target[j];
450fe517fc9Smrg			drm_intel_bo_gem *target_gem =
451fe517fc9Smrg			    (drm_intel_bo_gem *) target_bo;
452fe517fc9Smrg			DBG("%2d: %d %s(%s) -> "
453fe517fc9Smrg			    "%d *(%s)@0x%08x %08x\n",
454fe517fc9Smrg			    i,
455fe517fc9Smrg			    bo_gem->gem_handle,
4560655efefSmrg			    bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
457fe517fc9Smrg			    bo_gem->name,
458fe517fc9Smrg			    target_gem->gem_handle,
459fe517fc9Smrg			    target_gem->name,
460fe517fc9Smrg			    upper_32_bits(target_bo->offset64),
461fe517fc9Smrg			    lower_32_bits(target_bo->offset64));
462fe517fc9Smrg		}
46322944501Smrg	}
46422944501Smrg}
46522944501Smrg
46622944501Smrgstatic inline void
46722944501Smrgdrm_intel_gem_bo_reference(drm_intel_bo *bo)
46822944501Smrg{
46922944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
47022944501Smrg
47122944501Smrg	atomic_inc(&bo_gem->refcount);
47222944501Smrg}
47322944501Smrg
47422944501Smrg/**
47522944501Smrg * Adds the given buffer to the list of buffers to be validated (moved into the
47622944501Smrg * appropriate memory type) with the next batch submission.
47722944501Smrg *
47822944501Smrg * If a buffer is validated multiple times in a batch submission, it ends up
47922944501Smrg * with the intersection of the memory type flags and the union of the
48022944501Smrg * access flags.
48122944501Smrg */
48222944501Smrgstatic void
48322944501Smrgdrm_intel_add_validate_buffer(drm_intel_bo *bo)
48422944501Smrg{
48522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
48622944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
48722944501Smrg	int index;
48822944501Smrg
48922944501Smrg	if (bo_gem->validate_index != -1)
49022944501Smrg		return;
49122944501Smrg
49222944501Smrg	/* Extend the array of validation entries as necessary. */
49322944501Smrg	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
49422944501Smrg		int new_size = bufmgr_gem->exec_size * 2;
49522944501Smrg
49622944501Smrg		if (new_size == 0)
49722944501Smrg			new_size = 5;
49822944501Smrg
49922944501Smrg		bufmgr_gem->exec_objects =
50022944501Smrg		    realloc(bufmgr_gem->exec_objects,
50122944501Smrg			    sizeof(*bufmgr_gem->exec_objects) * new_size);
50222944501Smrg		bufmgr_gem->exec_bos =
50322944501Smrg		    realloc(bufmgr_gem->exec_bos,
50422944501Smrg			    sizeof(*bufmgr_gem->exec_bos) * new_size);
50522944501Smrg		bufmgr_gem->exec_size = new_size;
50622944501Smrg	}
50722944501Smrg
50822944501Smrg	index = bufmgr_gem->exec_count;
50922944501Smrg	bo_gem->validate_index = index;
51022944501Smrg	/* Fill in array entry */
51122944501Smrg	bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
51222944501Smrg	bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
51322944501Smrg	bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
514fe517fc9Smrg	bufmgr_gem->exec_objects[index].alignment = bo->align;
51522944501Smrg	bufmgr_gem->exec_objects[index].offset = 0;
51622944501Smrg	bufmgr_gem->exec_bos[index] = bo;
51722944501Smrg	bufmgr_gem->exec_count++;
51822944501Smrg}
51922944501Smrg
52022944501Smrgstatic void
52122944501Smrgdrm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
52222944501Smrg{
52322944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
52422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
52522944501Smrg	int index;
5260655efefSmrg	unsigned long flags;
527fe517fc9Smrg
5280655efefSmrg	flags = 0;
529fe517fc9Smrg	if (need_fence)
530fe517fc9Smrg		flags |= EXEC_OBJECT_NEEDS_FENCE;
53122944501Smrg
53222944501Smrg	if (bo_gem->validate_index != -1) {
533fe517fc9Smrg		bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
53422944501Smrg		return;
53522944501Smrg	}
53622944501Smrg
53722944501Smrg	/* Extend the array of validation entries as necessary. */
53822944501Smrg	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
53922944501Smrg		int new_size = bufmgr_gem->exec_size * 2;
54022944501Smrg
54122944501Smrg		if (new_size == 0)
54222944501Smrg			new_size = 5;
54322944501Smrg
54422944501Smrg		bufmgr_gem->exec2_objects =
54522944501Smrg			realloc(bufmgr_gem->exec2_objects,
54622944501Smrg				sizeof(*bufmgr_gem->exec2_objects) * new_size);
54722944501Smrg		bufmgr_gem->exec_bos =
54822944501Smrg			realloc(bufmgr_gem->exec_bos,
54922944501Smrg				sizeof(*bufmgr_gem->exec_bos) * new_size);
55022944501Smrg		bufmgr_gem->exec_size = new_size;
55122944501Smrg	}
55222944501Smrg
55322944501Smrg	index = bufmgr_gem->exec_count;
55422944501Smrg	bo_gem->validate_index = index;
55522944501Smrg	/* Fill in array entry */
55622944501Smrg	bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
55722944501Smrg	bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
55822944501Smrg	bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
559fe517fc9Smrg	bufmgr_gem->exec2_objects[index].alignment = bo->align;
5602ee35494Smrg	bufmgr_gem->exec2_objects[index].offset = bo->offset64;
5610655efefSmrg	bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags | flags;
56222944501Smrg	bufmgr_gem->exec2_objects[index].rsvd1 = 0;
56322944501Smrg	bufmgr_gem->exec2_objects[index].rsvd2 = 0;
5642ee35494Smrg	bufmgr_gem->exec_bos[index] = bo;
56522944501Smrg	bufmgr_gem->exec_count++;
56622944501Smrg}
56722944501Smrg
56822944501Smrg#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
56922944501Smrg	sizeof(uint32_t))
57022944501Smrg
57122944501Smrgstatic void
57222944501Smrgdrm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
573fe517fc9Smrg				      drm_intel_bo_gem *bo_gem,
574fe517fc9Smrg				      unsigned int alignment)
57522944501Smrg{
576fe517fc9Smrg	unsigned int size;
57722944501Smrg
57822944501Smrg	assert(!bo_gem->used_as_reloc_target);
57922944501Smrg
58022944501Smrg	/* The older chipsets are far-less flexible in terms of tiling,
58122944501Smrg	 * and require tiled buffer to be size aligned in the aperture.
58222944501Smrg	 * This means that in the worst possible case we will need a hole
58322944501Smrg	 * twice as large as the object in order for it to fit into the
58422944501Smrg	 * aperture. Optimal packing is for wimps.
58522944501Smrg	 */
58622944501Smrg	size = bo_gem->bo.size;
5879ce4edccSmrg	if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
588fe517fc9Smrg		unsigned int min_size;
5899ce4edccSmrg
5909ce4edccSmrg		if (bufmgr_gem->has_relaxed_fencing) {
5919ce4edccSmrg			if (bufmgr_gem->gen == 3)
5929ce4edccSmrg				min_size = 1024*1024;
5939ce4edccSmrg			else
5949ce4edccSmrg				min_size = 512*1024;
5959ce4edccSmrg
5969ce4edccSmrg			while (min_size < size)
5979ce4edccSmrg				min_size *= 2;
5989ce4edccSmrg		} else
5999ce4edccSmrg			min_size = size;
6009ce4edccSmrg
6019ce4edccSmrg		/* Account for worst-case alignment. */
602fe517fc9Smrg		alignment = MAX2(alignment, min_size);
6039ce4edccSmrg	}
60422944501Smrg
605fe517fc9Smrg	bo_gem->reloc_tree_size = size + alignment;
60622944501Smrg}
60722944501Smrg
60822944501Smrgstatic int
60922944501Smrgdrm_intel_setup_reloc_list(drm_intel_bo *bo)
61022944501Smrg{
61122944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
61222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
61322944501Smrg	unsigned int max_relocs = bufmgr_gem->max_relocs;
61422944501Smrg
61522944501Smrg	if (bo->size / 4 < max_relocs)
61622944501Smrg		max_relocs = bo->size / 4;
61722944501Smrg
61822944501Smrg	bo_gem->relocs = malloc(max_relocs *
61922944501Smrg				sizeof(struct drm_i915_gem_relocation_entry));
62022944501Smrg	bo_gem->reloc_target_info = malloc(max_relocs *
621aaba2545Smrg					   sizeof(drm_intel_reloc_target));
62222944501Smrg	if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
62320131375Smrg		bo_gem->has_error = true;
62422944501Smrg
62522944501Smrg		free (bo_gem->relocs);
62622944501Smrg		bo_gem->relocs = NULL;
62722944501Smrg
62822944501Smrg		free (bo_gem->reloc_target_info);
62922944501Smrg		bo_gem->reloc_target_info = NULL;
63022944501Smrg
63122944501Smrg		return 1;
63222944501Smrg	}
63322944501Smrg
63422944501Smrg	return 0;
63522944501Smrg}
63622944501Smrg
63722944501Smrgstatic int
63822944501Smrgdrm_intel_gem_bo_busy(drm_intel_bo *bo)
63922944501Smrg{
64022944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
64122944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
64222944501Smrg	struct drm_i915_gem_busy busy;
64322944501Smrg	int ret;
64422944501Smrg
64520131375Smrg	if (bo_gem->reusable && bo_gem->idle)
64620131375Smrg		return false;
64720131375Smrg
648424e9256Smrg	memclear(busy);
64922944501Smrg	busy.handle = bo_gem->gem_handle;
65022944501Smrg
6516d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
65220131375Smrg	if (ret == 0) {
65320131375Smrg		bo_gem->idle = !busy.busy;
65420131375Smrg		return busy.busy;
65520131375Smrg	} else {
65620131375Smrg		return false;
65720131375Smrg	}
65822944501Smrg}
65922944501Smrg
66022944501Smrgstatic int
66122944501Smrgdrm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
66222944501Smrg				  drm_intel_bo_gem *bo_gem, int state)
66322944501Smrg{
66422944501Smrg	struct drm_i915_gem_madvise madv;
66522944501Smrg
666424e9256Smrg	memclear(madv);
66722944501Smrg	madv.handle = bo_gem->gem_handle;
66822944501Smrg	madv.madv = state;
66922944501Smrg	madv.retained = 1;
6706d98c517Smrg	drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
67122944501Smrg
67222944501Smrg	return madv.retained;
67322944501Smrg}
67422944501Smrg
67522944501Smrgstatic int
67622944501Smrgdrm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
67722944501Smrg{
67822944501Smrg	return drm_intel_gem_bo_madvise_internal
67922944501Smrg		((drm_intel_bufmgr_gem *) bo->bufmgr,
68022944501Smrg		 (drm_intel_bo_gem *) bo,
68122944501Smrg		 madv);
68222944501Smrg}
68322944501Smrg
68422944501Smrg/* drop the oldest entries that have been purged by the kernel */
68522944501Smrgstatic void
68622944501Smrgdrm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
68722944501Smrg				    struct drm_intel_gem_bo_bucket *bucket)
68822944501Smrg{
68922944501Smrg	while (!DRMLISTEMPTY(&bucket->head)) {
69022944501Smrg		drm_intel_bo_gem *bo_gem;
69122944501Smrg
69222944501Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
69322944501Smrg				      bucket->head.next, head);
69422944501Smrg		if (drm_intel_gem_bo_madvise_internal
69522944501Smrg		    (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
69622944501Smrg			break;
69722944501Smrg
69822944501Smrg		DRMLISTDEL(&bo_gem->head);
69922944501Smrg		drm_intel_gem_bo_free(&bo_gem->bo);
70022944501Smrg	}
70122944501Smrg}
70222944501Smrg
70322944501Smrgstatic drm_intel_bo *
70422944501Smrgdrm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
70522944501Smrg				const char *name,
70622944501Smrg				unsigned long size,
7076d98c517Smrg				unsigned long flags,
7086d98c517Smrg				uint32_t tiling_mode,
709fe517fc9Smrg				unsigned long stride,
710fe517fc9Smrg				unsigned int alignment)
71122944501Smrg{
71222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
71322944501Smrg	drm_intel_bo_gem *bo_gem;
71422944501Smrg	unsigned int page_size = getpagesize();
71522944501Smrg	int ret;
71622944501Smrg	struct drm_intel_gem_bo_bucket *bucket;
71720131375Smrg	bool alloc_from_cache;
71822944501Smrg	unsigned long bo_size;
71920131375Smrg	bool for_render = false;
72022944501Smrg
72122944501Smrg	if (flags & BO_ALLOC_FOR_RENDER)
72220131375Smrg		for_render = true;
72322944501Smrg
72422944501Smrg	/* Round the allocated size up to a power of two number of pages. */
72522944501Smrg	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
72622944501Smrg
72722944501Smrg	/* If we don't have caching at this size, don't actually round the
72822944501Smrg	 * allocation up.
72922944501Smrg	 */
73022944501Smrg	if (bucket == NULL) {
73122944501Smrg		bo_size = size;
73222944501Smrg		if (bo_size < page_size)
73322944501Smrg			bo_size = page_size;
73422944501Smrg	} else {
73522944501Smrg		bo_size = bucket->size;
73622944501Smrg	}
73722944501Smrg
73822944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
73922944501Smrg	/* Get a buffer out of the cache if available */
74022944501Smrgretry:
74120131375Smrg	alloc_from_cache = false;
74222944501Smrg	if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
74322944501Smrg		if (for_render) {
74422944501Smrg			/* Allocate new render-target BOs from the tail (MRU)
74522944501Smrg			 * of the list, as it will likely be hot in the GPU
74622944501Smrg			 * cache and in the aperture for us.
74722944501Smrg			 */
74822944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
74922944501Smrg					      bucket->head.prev, head);
75022944501Smrg			DRMLISTDEL(&bo_gem->head);
75120131375Smrg			alloc_from_cache = true;
752fe517fc9Smrg			bo_gem->bo.align = alignment;
75322944501Smrg		} else {
754fe517fc9Smrg			assert(alignment == 0);
75522944501Smrg			/* For non-render-target BOs (where we're probably
75622944501Smrg			 * going to map it first thing in order to fill it
75722944501Smrg			 * with data), check if the last BO in the cache is
75822944501Smrg			 * unbusy, and only reuse in that case. Otherwise,
75922944501Smrg			 * allocating a new buffer is probably faster than
76022944501Smrg			 * waiting for the GPU to finish.
76122944501Smrg			 */
76222944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
76322944501Smrg					      bucket->head.next, head);
76422944501Smrg			if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
76520131375Smrg				alloc_from_cache = true;
76622944501Smrg				DRMLISTDEL(&bo_gem->head);
76722944501Smrg			}
76822944501Smrg		}
76922944501Smrg
77022944501Smrg		if (alloc_from_cache) {
77122944501Smrg			if (!drm_intel_gem_bo_madvise_internal
77222944501Smrg			    (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
77322944501Smrg				drm_intel_gem_bo_free(&bo_gem->bo);
77422944501Smrg				drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
77522944501Smrg								    bucket);
77622944501Smrg				goto retry;
77722944501Smrg			}
7786d98c517Smrg
7796d98c517Smrg			if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
7806d98c517Smrg								 tiling_mode,
7816d98c517Smrg								 stride)) {
7826d98c517Smrg				drm_intel_gem_bo_free(&bo_gem->bo);
7836d98c517Smrg				goto retry;
7846d98c517Smrg			}
78522944501Smrg		}
78622944501Smrg	}
78722944501Smrg
78822944501Smrg	if (!alloc_from_cache) {
78922944501Smrg		struct drm_i915_gem_create create;
79022944501Smrg
79122944501Smrg		bo_gem = calloc(1, sizeof(*bo_gem));
79222944501Smrg		if (!bo_gem)
7932ee35494Smrg			goto err;
7942ee35494Smrg
7952ee35494Smrg		/* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
7962ee35494Smrg		   list (vma_list), so better set the list head here */
7972ee35494Smrg		DRMINITLISTHEAD(&bo_gem->vma_list);
79822944501Smrg
79922944501Smrg		bo_gem->bo.size = bo_size;
80020131375Smrg
801424e9256Smrg		memclear(create);
80222944501Smrg		create.size = bo_size;
80322944501Smrg
8046d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
8056d98c517Smrg			       DRM_IOCTL_I915_GEM_CREATE,
8066d98c517Smrg			       &create);
80722944501Smrg		if (ret != 0) {
80822944501Smrg			free(bo_gem);
8092ee35494Smrg			goto err;
81022944501Smrg		}
8112ee35494Smrg
8122ee35494Smrg		bo_gem->gem_handle = create.handle;
8130655efefSmrg		HASH_ADD(handle_hh, bufmgr_gem->handle_table,
8140655efefSmrg			 gem_handle, sizeof(bo_gem->gem_handle),
8150655efefSmrg			 bo_gem);
8160655efefSmrg
8172ee35494Smrg		bo_gem->bo.handle = bo_gem->gem_handle;
81822944501Smrg		bo_gem->bo.bufmgr = bufmgr;
819fe517fc9Smrg		bo_gem->bo.align = alignment;
8206d98c517Smrg
8216d98c517Smrg		bo_gem->tiling_mode = I915_TILING_NONE;
8226d98c517Smrg		bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
8236d98c517Smrg		bo_gem->stride = 0;
8246d98c517Smrg
8256d98c517Smrg		if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
8266d98c517Smrg							 tiling_mode,
8272ee35494Smrg							 stride))
8282ee35494Smrg			goto err_free;
82922944501Smrg	}
83022944501Smrg
83122944501Smrg	bo_gem->name = name;
83222944501Smrg	atomic_set(&bo_gem->refcount, 1);
83322944501Smrg	bo_gem->validate_index = -1;
83422944501Smrg	bo_gem->reloc_tree_fences = 0;
83520131375Smrg	bo_gem->used_as_reloc_target = false;
83620131375Smrg	bo_gem->has_error = false;
83720131375Smrg	bo_gem->reusable = true;
83822944501Smrg
839fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
8402ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
84122944501Smrg
84222944501Smrg	DBG("bo_create: buf %d (%s) %ldb\n",
84322944501Smrg	    bo_gem->gem_handle, bo_gem->name, size);
84422944501Smrg
84522944501Smrg	return &bo_gem->bo;
8462ee35494Smrg
8472ee35494Smrgerr_free:
8482ee35494Smrg	drm_intel_gem_bo_free(&bo_gem->bo);
8492ee35494Smrgerr:
8502ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
8512ee35494Smrg	return NULL;
85222944501Smrg}
85322944501Smrg
85422944501Smrgstatic drm_intel_bo *
85522944501Smrgdrm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
85622944501Smrg				  const char *name,
85722944501Smrg				  unsigned long size,
85822944501Smrg				  unsigned int alignment)
85922944501Smrg{
86022944501Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
8616d98c517Smrg					       BO_ALLOC_FOR_RENDER,
862fe517fc9Smrg					       I915_TILING_NONE, 0,
863fe517fc9Smrg					       alignment);
86422944501Smrg}
86522944501Smrg
86622944501Smrgstatic drm_intel_bo *
86722944501Smrgdrm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
86822944501Smrg		       const char *name,
86922944501Smrg		       unsigned long size,
87022944501Smrg		       unsigned int alignment)
87122944501Smrg{
8726d98c517Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
873fe517fc9Smrg					       I915_TILING_NONE, 0, 0);
87422944501Smrg}
87522944501Smrg
87622944501Smrgstatic drm_intel_bo *
87722944501Smrgdrm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
87822944501Smrg			     int x, int y, int cpp, uint32_t *tiling_mode,
87922944501Smrg			     unsigned long *pitch, unsigned long flags)
88022944501Smrg{
88122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
882aaba2545Smrg	unsigned long size, stride;
883aaba2545Smrg	uint32_t tiling;
88422944501Smrg
885aaba2545Smrg	do {
88620131375Smrg		unsigned long aligned_y, height_alignment;
887aaba2545Smrg
888aaba2545Smrg		tiling = *tiling_mode;
889aaba2545Smrg
890aaba2545Smrg		/* If we're tiled, our allocations are in 8 or 32-row blocks,
891aaba2545Smrg		 * so failure to align our height means that we won't allocate
892aaba2545Smrg		 * enough pages.
893aaba2545Smrg		 *
894aaba2545Smrg		 * If we're untiled, we still have to align to 2 rows high
895aaba2545Smrg		 * because the data port accesses 2x2 blocks even if the
896aaba2545Smrg		 * bottom row isn't to be rendered, so failure to align means
897aaba2545Smrg		 * we could walk off the end of the GTT and fault.  This is
898aaba2545Smrg		 * documented on 965, and may be the case on older chipsets
899aaba2545Smrg		 * too so we try to be careful.
900aaba2545Smrg		 */
901aaba2545Smrg		aligned_y = y;
90220131375Smrg		height_alignment = 2;
90320131375Smrg
90420131375Smrg		if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
90520131375Smrg			height_alignment = 16;
90620131375Smrg		else if (tiling == I915_TILING_X
90720131375Smrg			|| (IS_915(bufmgr_gem->pci_device)
90820131375Smrg			    && tiling == I915_TILING_Y))
90920131375Smrg			height_alignment = 8;
910aaba2545Smrg		else if (tiling == I915_TILING_Y)
91120131375Smrg			height_alignment = 32;
91220131375Smrg		aligned_y = ALIGN(y, height_alignment);
913aaba2545Smrg
914aaba2545Smrg		stride = x * cpp;
9156d98c517Smrg		stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
916aaba2545Smrg		size = stride * aligned_y;
917aaba2545Smrg		size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
918aaba2545Smrg	} while (*tiling_mode != tiling);
91922944501Smrg	*pitch = stride;
92022944501Smrg
9216d98c517Smrg	if (tiling == I915_TILING_NONE)
9226d98c517Smrg		stride = 0;
9236d98c517Smrg
9246d98c517Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
925fe517fc9Smrg					       tiling, stride, 0);
92622944501Smrg}
92722944501Smrg
928a884aba1Smrgstatic drm_intel_bo *
929a884aba1Smrgdrm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
930a884aba1Smrg				const char *name,
931a884aba1Smrg				void *addr,
932a884aba1Smrg				uint32_t tiling_mode,
933a884aba1Smrg				uint32_t stride,
934a884aba1Smrg				unsigned long size,
935a884aba1Smrg				unsigned long flags)
936a884aba1Smrg{
937a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
938a884aba1Smrg	drm_intel_bo_gem *bo_gem;
939a884aba1Smrg	int ret;
940a884aba1Smrg	struct drm_i915_gem_userptr userptr;
941a884aba1Smrg
942a884aba1Smrg	/* Tiling with userptr surfaces is not supported
943a884aba1Smrg	 * on all hardware so refuse it for time being.
944a884aba1Smrg	 */
945a884aba1Smrg	if (tiling_mode != I915_TILING_NONE)
946a884aba1Smrg		return NULL;
947a884aba1Smrg
948a884aba1Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
949a884aba1Smrg	if (!bo_gem)
950a884aba1Smrg		return NULL;
951a884aba1Smrg
9522ee35494Smrg	atomic_set(&bo_gem->refcount, 1);
9532ee35494Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
9542ee35494Smrg
955a884aba1Smrg	bo_gem->bo.size = size;
956a884aba1Smrg
957424e9256Smrg	memclear(userptr);
958a884aba1Smrg	userptr.user_ptr = (__u64)((unsigned long)addr);
959a884aba1Smrg	userptr.user_size = size;
960a884aba1Smrg	userptr.flags = flags;
961a884aba1Smrg
962a884aba1Smrg	ret = drmIoctl(bufmgr_gem->fd,
963a884aba1Smrg			DRM_IOCTL_I915_GEM_USERPTR,
964a884aba1Smrg			&userptr);
965a884aba1Smrg	if (ret != 0) {
966a884aba1Smrg		DBG("bo_create_userptr: "
967a884aba1Smrg		    "ioctl failed with user ptr %p size 0x%lx, "
968a884aba1Smrg		    "user flags 0x%lx\n", addr, size, flags);
969a884aba1Smrg		free(bo_gem);
970a884aba1Smrg		return NULL;
971a884aba1Smrg	}
972a884aba1Smrg
9732ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
9742ee35494Smrg
975a884aba1Smrg	bo_gem->gem_handle = userptr.handle;
976a884aba1Smrg	bo_gem->bo.handle = bo_gem->gem_handle;
977a884aba1Smrg	bo_gem->bo.bufmgr    = bufmgr;
978a884aba1Smrg	bo_gem->is_userptr   = true;
979a884aba1Smrg	bo_gem->bo.virtual   = addr;
980a884aba1Smrg	/* Save the address provided by user */
981a884aba1Smrg	bo_gem->user_virtual = addr;
982a884aba1Smrg	bo_gem->tiling_mode  = I915_TILING_NONE;
983a884aba1Smrg	bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
984a884aba1Smrg	bo_gem->stride       = 0;
985a884aba1Smrg
9862ee35494Smrg	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
9872ee35494Smrg		 gem_handle, sizeof(bo_gem->gem_handle),
9882ee35494Smrg		 bo_gem);
989a884aba1Smrg
990a884aba1Smrg	bo_gem->name = name;
991a884aba1Smrg	bo_gem->validate_index = -1;
992a884aba1Smrg	bo_gem->reloc_tree_fences = 0;
993a884aba1Smrg	bo_gem->used_as_reloc_target = false;
994a884aba1Smrg	bo_gem->has_error = false;
995a884aba1Smrg	bo_gem->reusable = false;
996a884aba1Smrg
997fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
9982ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
999a884aba1Smrg
1000a884aba1Smrg	DBG("bo_create_userptr: "
1001a884aba1Smrg	    "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1002a884aba1Smrg		addr, bo_gem->gem_handle, bo_gem->name,
1003a884aba1Smrg		size, stride, tiling_mode);
1004a884aba1Smrg
1005a884aba1Smrg	return &bo_gem->bo;
1006a884aba1Smrg}
1007a884aba1Smrg
1008424e9256Smrgstatic bool
1009424e9256Smrghas_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1010424e9256Smrg{
1011424e9256Smrg	int ret;
1012424e9256Smrg	void *ptr;
1013424e9256Smrg	long pgsz;
1014424e9256Smrg	struct drm_i915_gem_userptr userptr;
1015424e9256Smrg
1016424e9256Smrg	pgsz = sysconf(_SC_PAGESIZE);
1017424e9256Smrg	assert(pgsz > 0);
1018424e9256Smrg
1019424e9256Smrg	ret = posix_memalign(&ptr, pgsz, pgsz);
1020424e9256Smrg	if (ret) {
1021424e9256Smrg		DBG("Failed to get a page (%ld) for userptr detection!\n",
1022424e9256Smrg			pgsz);
1023424e9256Smrg		return false;
1024424e9256Smrg	}
1025424e9256Smrg
1026424e9256Smrg	memclear(userptr);
1027424e9256Smrg	userptr.user_ptr = (__u64)(unsigned long)ptr;
1028424e9256Smrg	userptr.user_size = pgsz;
1029424e9256Smrg
1030424e9256Smrgretry:
1031424e9256Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1032424e9256Smrg	if (ret) {
1033424e9256Smrg		if (errno == ENODEV && userptr.flags == 0) {
1034424e9256Smrg			userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1035424e9256Smrg			goto retry;
1036424e9256Smrg		}
1037424e9256Smrg		free(ptr);
1038424e9256Smrg		return false;
1039424e9256Smrg	}
1040424e9256Smrg
1041424e9256Smrg	/* We don't release the userptr bo here as we want to keep the
1042424e9256Smrg	 * kernel mm tracking alive for our lifetime. The first time we
1043424e9256Smrg	 * create a userptr object the kernel has to install a mmu_notifer
1044424e9256Smrg	 * which is a heavyweight operation (e.g. it requires taking all
1045424e9256Smrg	 * mm_locks and stop_machine()).
1046424e9256Smrg	 */
1047424e9256Smrg
1048424e9256Smrg	bufmgr_gem->userptr_active.ptr = ptr;
1049424e9256Smrg	bufmgr_gem->userptr_active.handle = userptr.handle;
1050424e9256Smrg
1051424e9256Smrg	return true;
1052424e9256Smrg}
1053424e9256Smrg
1054424e9256Smrgstatic drm_intel_bo *
1055424e9256Smrgcheck_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1056424e9256Smrg		       const char *name,
1057424e9256Smrg		       void *addr,
1058424e9256Smrg		       uint32_t tiling_mode,
1059424e9256Smrg		       uint32_t stride,
1060424e9256Smrg		       unsigned long size,
1061424e9256Smrg		       unsigned long flags)
1062424e9256Smrg{
1063424e9256Smrg	if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1064424e9256Smrg		bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1065424e9256Smrg	else
1066424e9256Smrg		bufmgr->bo_alloc_userptr = NULL;
1067424e9256Smrg
1068424e9256Smrg	return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1069424e9256Smrg					  tiling_mode, stride, size, flags);
1070424e9256Smrg}
1071424e9256Smrg
107287bf8e7cSmrgstatic int get_tiling_mode(drm_intel_bufmgr_gem *bufmgr_gem,
107387bf8e7cSmrg			   uint32_t gem_handle,
107487bf8e7cSmrg			   uint32_t *tiling_mode,
107587bf8e7cSmrg			   uint32_t *swizzle_mode)
107687bf8e7cSmrg{
107787bf8e7cSmrg	struct drm_i915_gem_get_tiling get_tiling = {
107887bf8e7cSmrg		.handle = gem_handle,
107987bf8e7cSmrg	};
108087bf8e7cSmrg	int ret;
108187bf8e7cSmrg
108287bf8e7cSmrg	ret = drmIoctl(bufmgr_gem->fd,
108387bf8e7cSmrg		       DRM_IOCTL_I915_GEM_GET_TILING,
108487bf8e7cSmrg		       &get_tiling);
108587bf8e7cSmrg	if (ret != 0 && errno != EOPNOTSUPP)
108687bf8e7cSmrg		return ret;
108787bf8e7cSmrg
108887bf8e7cSmrg	*tiling_mode = get_tiling.tiling_mode;
108987bf8e7cSmrg	*swizzle_mode = get_tiling.swizzle_mode;
109087bf8e7cSmrg
109187bf8e7cSmrg	return 0;
109287bf8e7cSmrg}
109387bf8e7cSmrg
109422944501Smrg/**
109522944501Smrg * Returns a drm_intel_bo wrapping the given buffer object handle.
109622944501Smrg *
109722944501Smrg * This can be used when one application needs to pass a buffer object
109822944501Smrg * to another.
109922944501Smrg */
11006260e5d5Smrgdrm_public drm_intel_bo *
110122944501Smrgdrm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
110222944501Smrg				  const char *name,
110322944501Smrg				  unsigned int handle)
110422944501Smrg{
110522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
110622944501Smrg	drm_intel_bo_gem *bo_gem;
110722944501Smrg	int ret;
110822944501Smrg	struct drm_gem_open open_arg;
110922944501Smrg
111020131375Smrg	/* At the moment most applications only have a few named bo.
111120131375Smrg	 * For instance, in a DRI client only the render buffers passed
111220131375Smrg	 * between X and the client are named. And since X returns the
111320131375Smrg	 * alternating names for the front/back buffer a linear search
111420131375Smrg	 * provides a sufficiently fast match.
111520131375Smrg	 */
1116a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
11172ee35494Smrg	HASH_FIND(name_hh, bufmgr_gem->name_table,
11182ee35494Smrg		  &handle, sizeof(handle), bo_gem);
11192ee35494Smrg	if (bo_gem) {
11202ee35494Smrg		drm_intel_gem_bo_reference(&bo_gem->bo);
11212ee35494Smrg		goto out;
112220131375Smrg	}
112322944501Smrg
1124424e9256Smrg	memclear(open_arg);
112522944501Smrg	open_arg.name = handle;
11266d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
11276d98c517Smrg		       DRM_IOCTL_GEM_OPEN,
11286d98c517Smrg		       &open_arg);
112922944501Smrg	if (ret != 0) {
11309ce4edccSmrg		DBG("Couldn't reference %s handle 0x%08x: %s\n",
11319ce4edccSmrg		    name, handle, strerror(errno));
11322ee35494Smrg		bo_gem = NULL;
11332ee35494Smrg		goto out;
113422944501Smrg	}
113520131375Smrg        /* Now see if someone has used a prime handle to get this
113620131375Smrg         * object from the kernel before by looking through the list
113720131375Smrg         * again for a matching gem_handle
113820131375Smrg         */
11392ee35494Smrg	HASH_FIND(handle_hh, bufmgr_gem->handle_table,
11402ee35494Smrg		  &open_arg.handle, sizeof(open_arg.handle), bo_gem);
11412ee35494Smrg	if (bo_gem) {
11422ee35494Smrg		drm_intel_gem_bo_reference(&bo_gem->bo);
11432ee35494Smrg		goto out;
114420131375Smrg	}
114520131375Smrg
114620131375Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
11472ee35494Smrg	if (!bo_gem)
11482ee35494Smrg		goto out;
11492ee35494Smrg
11502ee35494Smrg	atomic_set(&bo_gem->refcount, 1);
11512ee35494Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
115220131375Smrg
115322944501Smrg	bo_gem->bo.size = open_arg.size;
115422944501Smrg	bo_gem->bo.offset = 0;
115520131375Smrg	bo_gem->bo.offset64 = 0;
115622944501Smrg	bo_gem->bo.virtual = NULL;
115722944501Smrg	bo_gem->bo.bufmgr = bufmgr;
115822944501Smrg	bo_gem->name = name;
115922944501Smrg	bo_gem->validate_index = -1;
116022944501Smrg	bo_gem->gem_handle = open_arg.handle;
116120131375Smrg	bo_gem->bo.handle = open_arg.handle;
116222944501Smrg	bo_gem->global_name = handle;
116320131375Smrg	bo_gem->reusable = false;
116422944501Smrg
11652ee35494Smrg	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
11662ee35494Smrg		 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
11672ee35494Smrg	HASH_ADD(name_hh, bufmgr_gem->name_table,
11682ee35494Smrg		 global_name, sizeof(bo_gem->global_name), bo_gem);
11692ee35494Smrg
117087bf8e7cSmrg	ret = get_tiling_mode(bufmgr_gem, bo_gem->gem_handle,
117187bf8e7cSmrg			      &bo_gem->tiling_mode, &bo_gem->swizzle_mode);
11722ee35494Smrg	if (ret != 0)
11732ee35494Smrg		goto err_unref;
11742ee35494Smrg
11756d98c517Smrg	/* XXX stride is unknown */
1176fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
117722944501Smrg	DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
117822944501Smrg
11792ee35494Smrgout:
11802ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
118122944501Smrg	return &bo_gem->bo;
11822ee35494Smrg
11832ee35494Smrgerr_unref:
11842ee35494Smrg	drm_intel_gem_bo_free(&bo_gem->bo);
11852ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
11862ee35494Smrg	return NULL;
118722944501Smrg}
118822944501Smrg
118922944501Smrgstatic void
119022944501Smrgdrm_intel_gem_bo_free(drm_intel_bo *bo)
119122944501Smrg{
119222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
119322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
119422944501Smrg	struct drm_gem_close close;
119522944501Smrg	int ret;
119622944501Smrg
119720131375Smrg	DRMLISTDEL(&bo_gem->vma_list);
119820131375Smrg	if (bo_gem->mem_virtual) {
119920131375Smrg		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1200a884aba1Smrg		drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
120120131375Smrg		bufmgr_gem->vma_count--;
120220131375Smrg	}
12032ee35494Smrg	if (bo_gem->wc_virtual) {
12042ee35494Smrg		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
12052ee35494Smrg		drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
12062ee35494Smrg		bufmgr_gem->vma_count--;
12072ee35494Smrg	}
120820131375Smrg	if (bo_gem->gtt_virtual) {
1209a884aba1Smrg		drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
121020131375Smrg		bufmgr_gem->vma_count--;
121120131375Smrg	}
121222944501Smrg
12132ee35494Smrg	if (bo_gem->global_name)
12142ee35494Smrg		HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
12152ee35494Smrg	HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
12162ee35494Smrg
121722944501Smrg	/* Close this object */
1218424e9256Smrg	memclear(close);
121922944501Smrg	close.handle = bo_gem->gem_handle;
12206d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
122122944501Smrg	if (ret != 0) {
12229ce4edccSmrg		DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
12239ce4edccSmrg		    bo_gem->gem_handle, bo_gem->name, strerror(errno));
122422944501Smrg	}
122522944501Smrg	free(bo);
122622944501Smrg}
122722944501Smrg
122820131375Smrgstatic void
122920131375Smrgdrm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
123020131375Smrg{
123120131375Smrg#if HAVE_VALGRIND
123220131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
123320131375Smrg
123420131375Smrg	if (bo_gem->mem_virtual)
123520131375Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
123620131375Smrg
12372ee35494Smrg	if (bo_gem->wc_virtual)
12382ee35494Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
12392ee35494Smrg
124020131375Smrg	if (bo_gem->gtt_virtual)
124120131375Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
124220131375Smrg#endif
124320131375Smrg}
124420131375Smrg
124522944501Smrg/** Frees all cached buffers significantly older than @time. */
124622944501Smrgstatic void
124722944501Smrgdrm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
124822944501Smrg{
124922944501Smrg	int i;
125022944501Smrg
12516d98c517Smrg	if (bufmgr_gem->time == time)
12526d98c517Smrg		return;
12536d98c517Smrg
1254aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
125522944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
125622944501Smrg		    &bufmgr_gem->cache_bucket[i];
125722944501Smrg
125822944501Smrg		while (!DRMLISTEMPTY(&bucket->head)) {
125922944501Smrg			drm_intel_bo_gem *bo_gem;
126022944501Smrg
126122944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
126222944501Smrg					      bucket->head.next, head);
126322944501Smrg			if (time - bo_gem->free_time <= 1)
126422944501Smrg				break;
126522944501Smrg
126622944501Smrg			DRMLISTDEL(&bo_gem->head);
126722944501Smrg
126822944501Smrg			drm_intel_gem_bo_free(&bo_gem->bo);
126922944501Smrg		}
127022944501Smrg	}
12716d98c517Smrg
12726d98c517Smrg	bufmgr_gem->time = time;
127322944501Smrg}
127422944501Smrg
127520131375Smrgstatic void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
127620131375Smrg{
127720131375Smrg	int limit;
127820131375Smrg
127920131375Smrg	DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
128020131375Smrg	    bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
128120131375Smrg
128220131375Smrg	if (bufmgr_gem->vma_max < 0)
128320131375Smrg		return;
128420131375Smrg
128520131375Smrg	/* We may need to evict a few entries in order to create new mmaps */
128620131375Smrg	limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
128720131375Smrg	if (limit < 0)
128820131375Smrg		limit = 0;
128920131375Smrg
129020131375Smrg	while (bufmgr_gem->vma_count > limit) {
129120131375Smrg		drm_intel_bo_gem *bo_gem;
129220131375Smrg
129320131375Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
129420131375Smrg				      bufmgr_gem->vma_cache.next,
129520131375Smrg				      vma_list);
129620131375Smrg		assert(bo_gem->map_count == 0);
129720131375Smrg		DRMLISTDELINIT(&bo_gem->vma_list);
129820131375Smrg
129920131375Smrg		if (bo_gem->mem_virtual) {
1300a884aba1Smrg			drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
130120131375Smrg			bo_gem->mem_virtual = NULL;
130220131375Smrg			bufmgr_gem->vma_count--;
130320131375Smrg		}
13042ee35494Smrg		if (bo_gem->wc_virtual) {
13052ee35494Smrg			drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
13062ee35494Smrg			bo_gem->wc_virtual = NULL;
13072ee35494Smrg			bufmgr_gem->vma_count--;
13082ee35494Smrg		}
130920131375Smrg		if (bo_gem->gtt_virtual) {
1310a884aba1Smrg			drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
131120131375Smrg			bo_gem->gtt_virtual = NULL;
131220131375Smrg			bufmgr_gem->vma_count--;
131320131375Smrg		}
131420131375Smrg	}
131520131375Smrg}
131620131375Smrg
131720131375Smrgstatic void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
131820131375Smrg				       drm_intel_bo_gem *bo_gem)
131920131375Smrg{
132020131375Smrg	bufmgr_gem->vma_open--;
132120131375Smrg	DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
132220131375Smrg	if (bo_gem->mem_virtual)
132320131375Smrg		bufmgr_gem->vma_count++;
13242ee35494Smrg	if (bo_gem->wc_virtual)
13252ee35494Smrg		bufmgr_gem->vma_count++;
132620131375Smrg	if (bo_gem->gtt_virtual)
132720131375Smrg		bufmgr_gem->vma_count++;
132820131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
132920131375Smrg}
133020131375Smrg
133120131375Smrgstatic void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
133220131375Smrg				      drm_intel_bo_gem *bo_gem)
133320131375Smrg{
133420131375Smrg	bufmgr_gem->vma_open++;
133520131375Smrg	DRMLISTDEL(&bo_gem->vma_list);
133620131375Smrg	if (bo_gem->mem_virtual)
133720131375Smrg		bufmgr_gem->vma_count--;
13382ee35494Smrg	if (bo_gem->wc_virtual)
13392ee35494Smrg		bufmgr_gem->vma_count--;
134020131375Smrg	if (bo_gem->gtt_virtual)
134120131375Smrg		bufmgr_gem->vma_count--;
134220131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
134320131375Smrg}
134420131375Smrg
134522944501Smrgstatic void
134622944501Smrgdrm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
134722944501Smrg{
134822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
134922944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
135022944501Smrg	struct drm_intel_gem_bo_bucket *bucket;
135122944501Smrg	int i;
135222944501Smrg
135322944501Smrg	/* Unreference all the target buffers */
135422944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
1355aaba2545Smrg		if (bo_gem->reloc_target_info[i].bo != bo) {
1356aaba2545Smrg			drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1357aaba2545Smrg								  reloc_target_info[i].bo,
1358aaba2545Smrg								  time);
1359aaba2545Smrg		}
136022944501Smrg	}
1361fe517fc9Smrg	for (i = 0; i < bo_gem->softpin_target_count; i++)
1362fe517fc9Smrg		drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1363fe517fc9Smrg								  time);
13642ee35494Smrg	bo_gem->kflags = 0;
136522944501Smrg	bo_gem->reloc_count = 0;
136620131375Smrg	bo_gem->used_as_reloc_target = false;
1367fe517fc9Smrg	bo_gem->softpin_target_count = 0;
136822944501Smrg
136922944501Smrg	DBG("bo_unreference final: %d (%s)\n",
137022944501Smrg	    bo_gem->gem_handle, bo_gem->name);
137122944501Smrg
137222944501Smrg	/* release memory associated with this object */
137322944501Smrg	if (bo_gem->reloc_target_info) {
137422944501Smrg		free(bo_gem->reloc_target_info);
137522944501Smrg		bo_gem->reloc_target_info = NULL;
137622944501Smrg	}
137722944501Smrg	if (bo_gem->relocs) {
137822944501Smrg		free(bo_gem->relocs);
137922944501Smrg		bo_gem->relocs = NULL;
138022944501Smrg	}
1381fe517fc9Smrg	if (bo_gem->softpin_target) {
1382fe517fc9Smrg		free(bo_gem->softpin_target);
1383fe517fc9Smrg		bo_gem->softpin_target = NULL;
1384fe517fc9Smrg		bo_gem->softpin_target_size = 0;
1385fe517fc9Smrg	}
138622944501Smrg
138720131375Smrg	/* Clear any left-over mappings */
138820131375Smrg	if (bo_gem->map_count) {
138920131375Smrg		DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
139020131375Smrg		bo_gem->map_count = 0;
139120131375Smrg		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
139220131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
139320131375Smrg	}
139420131375Smrg
139522944501Smrg	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
139622944501Smrg	/* Put the buffer into our internal cache for reuse if we can. */
139722944501Smrg	if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
139822944501Smrg	    drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
139922944501Smrg					      I915_MADV_DONTNEED)) {
140022944501Smrg		bo_gem->free_time = time;
140122944501Smrg
140222944501Smrg		bo_gem->name = NULL;
140322944501Smrg		bo_gem->validate_index = -1;
140422944501Smrg
140522944501Smrg		DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
140622944501Smrg	} else {
140722944501Smrg		drm_intel_gem_bo_free(bo);
140822944501Smrg	}
140922944501Smrg}
141022944501Smrg
141122944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
141222944501Smrg						      time_t time)
141322944501Smrg{
141422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
141522944501Smrg
141622944501Smrg	assert(atomic_read(&bo_gem->refcount) > 0);
141722944501Smrg	if (atomic_dec_and_test(&bo_gem->refcount))
141822944501Smrg		drm_intel_gem_bo_unreference_final(bo, time);
141922944501Smrg}
142022944501Smrg
142122944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
142222944501Smrg{
142322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
142422944501Smrg
142522944501Smrg	assert(atomic_read(&bo_gem->refcount) > 0);
1426a884aba1Smrg
1427a884aba1Smrg	if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
142822944501Smrg		drm_intel_bufmgr_gem *bufmgr_gem =
142922944501Smrg		    (drm_intel_bufmgr_gem *) bo->bufmgr;
143022944501Smrg		struct timespec time;
143122944501Smrg
143222944501Smrg		clock_gettime(CLOCK_MONOTONIC, &time);
143322944501Smrg
143422944501Smrg		pthread_mutex_lock(&bufmgr_gem->lock);
1435a884aba1Smrg
1436a884aba1Smrg		if (atomic_dec_and_test(&bo_gem->refcount)) {
1437a884aba1Smrg			drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1438a884aba1Smrg			drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1439a884aba1Smrg		}
1440a884aba1Smrg
144122944501Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
144222944501Smrg	}
144322944501Smrg}
144422944501Smrg
144522944501Smrgstatic int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
144622944501Smrg{
144722944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
144822944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
144922944501Smrg	struct drm_i915_gem_set_domain set_domain;
145022944501Smrg	int ret;
145122944501Smrg
1452a884aba1Smrg	if (bo_gem->is_userptr) {
1453a884aba1Smrg		/* Return the same user ptr */
1454a884aba1Smrg		bo->virtual = bo_gem->user_virtual;
1455a884aba1Smrg		return 0;
1456a884aba1Smrg	}
1457a884aba1Smrg
145822944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
145922944501Smrg
146020131375Smrg	if (bo_gem->map_count++ == 0)
146120131375Smrg		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
146220131375Smrg
146322944501Smrg	if (!bo_gem->mem_virtual) {
146422944501Smrg		struct drm_i915_gem_mmap mmap_arg;
146522944501Smrg
146620131375Smrg		DBG("bo_map: %d (%s), map_count=%d\n",
146720131375Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
146822944501Smrg
1469424e9256Smrg		memclear(mmap_arg);
147022944501Smrg		mmap_arg.handle = bo_gem->gem_handle;
147122944501Smrg		mmap_arg.size = bo->size;
14726d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
14736d98c517Smrg			       DRM_IOCTL_I915_GEM_MMAP,
14746d98c517Smrg			       &mmap_arg);
147522944501Smrg		if (ret != 0) {
147622944501Smrg			ret = -errno;
14779ce4edccSmrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
14789ce4edccSmrg			    __FILE__, __LINE__, bo_gem->gem_handle,
14799ce4edccSmrg			    bo_gem->name, strerror(errno));
148020131375Smrg			if (--bo_gem->map_count == 0)
148120131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
148222944501Smrg			pthread_mutex_unlock(&bufmgr_gem->lock);
148322944501Smrg			return ret;
148422944501Smrg		}
148520131375Smrg		VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
148622944501Smrg		bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
148722944501Smrg	}
148822944501Smrg	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
148922944501Smrg	    bo_gem->mem_virtual);
149022944501Smrg	bo->virtual = bo_gem->mem_virtual;
149122944501Smrg
1492424e9256Smrg	memclear(set_domain);
149322944501Smrg	set_domain.handle = bo_gem->gem_handle;
149422944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_CPU;
149522944501Smrg	if (write_enable)
149622944501Smrg		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
149722944501Smrg	else
149822944501Smrg		set_domain.write_domain = 0;
14996d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
15006d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
15016d98c517Smrg		       &set_domain);
150222944501Smrg	if (ret != 0) {
15039ce4edccSmrg		DBG("%s:%d: Error setting to CPU domain %d: %s\n",
15049ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
15059ce4edccSmrg		    strerror(errno));
150622944501Smrg	}
150722944501Smrg
150820131375Smrg	if (write_enable)
150920131375Smrg		bo_gem->mapped_cpu_write = true;
151020131375Smrg
151120131375Smrg	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
151220131375Smrg	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
151322944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
151422944501Smrg
151522944501Smrg	return 0;
151622944501Smrg}
151722944501Smrg
151820131375Smrgstatic int
151920131375Smrgmap_gtt(drm_intel_bo *bo)
152022944501Smrg{
152122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
152222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
152322944501Smrg	int ret;
152422944501Smrg
1525a884aba1Smrg	if (bo_gem->is_userptr)
1526a884aba1Smrg		return -EINVAL;
1527a884aba1Smrg
152820131375Smrg	if (bo_gem->map_count++ == 0)
152920131375Smrg		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
153022944501Smrg
153122944501Smrg	/* Get a mapping of the buffer if we haven't before. */
153222944501Smrg	if (bo_gem->gtt_virtual == NULL) {
153322944501Smrg		struct drm_i915_gem_mmap_gtt mmap_arg;
153422944501Smrg
153520131375Smrg		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
153620131375Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
153722944501Smrg
1538424e9256Smrg		memclear(mmap_arg);
153922944501Smrg		mmap_arg.handle = bo_gem->gem_handle;
154022944501Smrg
154122944501Smrg		/* Get the fake offset back... */
15426d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
15436d98c517Smrg			       DRM_IOCTL_I915_GEM_MMAP_GTT,
15446d98c517Smrg			       &mmap_arg);
154522944501Smrg		if (ret != 0) {
154622944501Smrg			ret = -errno;
15479ce4edccSmrg			DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
15489ce4edccSmrg			    __FILE__, __LINE__,
15499ce4edccSmrg			    bo_gem->gem_handle, bo_gem->name,
15509ce4edccSmrg			    strerror(errno));
155120131375Smrg			if (--bo_gem->map_count == 0)
155220131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
155322944501Smrg			return ret;
155422944501Smrg		}
155522944501Smrg
155622944501Smrg		/* and mmap it */
1557fd815b59Smaya		bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1558fd815b59Smaya					       MAP_SHARED, bufmgr_gem->fd,
1559fd815b59Smaya					       mmap_arg.offset);
1560d6e8b34dStnn		if (bo_gem->gtt_virtual == MAP_FAILED) {
156122944501Smrg			bo_gem->gtt_virtual = NULL;
1562d6e8b34dStnn			ret = -errno;
15639ce4edccSmrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
15649ce4edccSmrg			    __FILE__, __LINE__,
15659ce4edccSmrg			    bo_gem->gem_handle, bo_gem->name,
15669ce4edccSmrg			    strerror(errno));
156720131375Smrg			if (--bo_gem->map_count == 0)
156820131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
156922944501Smrg			return ret;
157022944501Smrg		}
157122944501Smrg	}
157222944501Smrg
157322944501Smrg	bo->virtual = bo_gem->gtt_virtual;
157422944501Smrg
157522944501Smrg	DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
157622944501Smrg	    bo_gem->gtt_virtual);
157722944501Smrg
157820131375Smrg	return 0;
157920131375Smrg}
158020131375Smrg
15816260e5d5Smrgdrm_public int
1582a884aba1Smrgdrm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
158320131375Smrg{
158420131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
158520131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
158620131375Smrg	struct drm_i915_gem_set_domain set_domain;
158720131375Smrg	int ret;
158820131375Smrg
158920131375Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
159020131375Smrg
159120131375Smrg	ret = map_gtt(bo);
159220131375Smrg	if (ret) {
159320131375Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
159420131375Smrg		return ret;
159520131375Smrg	}
159620131375Smrg
159720131375Smrg	/* Now move it to the GTT domain so that the GPU and CPU
159820131375Smrg	 * caches are flushed and the GPU isn't actively using the
159920131375Smrg	 * buffer.
160020131375Smrg	 *
160120131375Smrg	 * The pagefault handler does this domain change for us when
160220131375Smrg	 * it has unbound the BO from the GTT, but it's up to us to
160320131375Smrg	 * tell it when we're about to use things if we had done
160420131375Smrg	 * rendering and it still happens to be bound to the GTT.
160520131375Smrg	 */
1606424e9256Smrg	memclear(set_domain);
160722944501Smrg	set_domain.handle = bo_gem->gem_handle;
160822944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
160922944501Smrg	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
16106d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
16116d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
16126d98c517Smrg		       &set_domain);
161322944501Smrg	if (ret != 0) {
16149ce4edccSmrg		DBG("%s:%d: Error setting domain %d: %s\n",
16159ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
16169ce4edccSmrg		    strerror(errno));
161722944501Smrg	}
161822944501Smrg
161920131375Smrg	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
162020131375Smrg	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
162122944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
162222944501Smrg
16236d98c517Smrg	return 0;
162422944501Smrg}
162522944501Smrg
162620131375Smrg/**
162720131375Smrg * Performs a mapping of the buffer object like the normal GTT
162820131375Smrg * mapping, but avoids waiting for the GPU to be done reading from or
162920131375Smrg * rendering to the buffer.
163020131375Smrg *
163120131375Smrg * This is used in the implementation of GL_ARB_map_buffer_range: The
163220131375Smrg * user asks to create a buffer, then does a mapping, fills some
163320131375Smrg * space, runs a drawing command, then asks to map it again without
163420131375Smrg * synchronizing because it guarantees that it won't write over the
163520131375Smrg * data that the GPU is busy using (or, more specifically, that if it
163620131375Smrg * does write over the data, it acknowledges that rendering is
163720131375Smrg * undefined).
163820131375Smrg */
163920131375Smrg
16406260e5d5Smrgdrm_public int
1641a884aba1Smrgdrm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
164222944501Smrg{
164322944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
16442b90624aSmrg#if HAVE_VALGRIND
164520131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
164620131375Smrg#endif
164720131375Smrg	int ret;
164822944501Smrg
164920131375Smrg	/* If the CPU cache isn't coherent with the GTT, then use a
165020131375Smrg	 * regular synchronized mapping.  The problem is that we don't
165120131375Smrg	 * track where the buffer was last used on the CPU side in
165220131375Smrg	 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
165320131375Smrg	 * we would potentially corrupt the buffer even when the user
165420131375Smrg	 * does reasonable things.
165520131375Smrg	 */
165620131375Smrg	if (!bufmgr_gem->has_llc)
165720131375Smrg		return drm_intel_gem_bo_map_gtt(bo);
165822944501Smrg
165922944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
166020131375Smrg
166120131375Smrg	ret = map_gtt(bo);
166220131375Smrg	if (ret == 0) {
166320131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
166420131375Smrg		VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
166520131375Smrg	}
166620131375Smrg
166722944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
166822944501Smrg
166922944501Smrg	return ret;
167022944501Smrg}
167122944501Smrg
167222944501Smrgstatic int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
167322944501Smrg{
1674a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
167522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
167620131375Smrg	int ret = 0;
167722944501Smrg
167822944501Smrg	if (bo == NULL)
167922944501Smrg		return 0;
168022944501Smrg
1681a884aba1Smrg	if (bo_gem->is_userptr)
1682a884aba1Smrg		return 0;
1683a884aba1Smrg
1684a884aba1Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1685a884aba1Smrg
168622944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
168722944501Smrg
168820131375Smrg	if (bo_gem->map_count <= 0) {
168920131375Smrg		DBG("attempted to unmap an unmapped bo\n");
169020131375Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
169120131375Smrg		/* Preserve the old behaviour of just treating this as a
169220131375Smrg		 * no-op rather than reporting the error.
169320131375Smrg		 */
169420131375Smrg		return 0;
169520131375Smrg	}
169620131375Smrg
169720131375Smrg	if (bo_gem->mapped_cpu_write) {
169820131375Smrg		struct drm_i915_gem_sw_finish sw_finish;
169920131375Smrg
170020131375Smrg		/* Cause a flush to happen if the buffer's pinned for
170120131375Smrg		 * scanout, so the results show up in a timely manner.
170220131375Smrg		 * Unlike GTT set domains, this only does work if the
170320131375Smrg		 * buffer should be scanout-related.
170420131375Smrg		 */
1705424e9256Smrg		memclear(sw_finish);
170620131375Smrg		sw_finish.handle = bo_gem->gem_handle;
170720131375Smrg		ret = drmIoctl(bufmgr_gem->fd,
170820131375Smrg			       DRM_IOCTL_I915_GEM_SW_FINISH,
170920131375Smrg			       &sw_finish);
171020131375Smrg		ret = ret == -1 ? -errno : 0;
171120131375Smrg
171220131375Smrg		bo_gem->mapped_cpu_write = false;
171320131375Smrg	}
171422944501Smrg
171520131375Smrg	/* We need to unmap after every innovation as we cannot track
17162ee35494Smrg	 * an open vma for every bo as that will exhaust the system
171720131375Smrg	 * limits and cause later failures.
171820131375Smrg	 */
171920131375Smrg	if (--bo_gem->map_count == 0) {
172020131375Smrg		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
172120131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
172220131375Smrg		bo->virtual = NULL;
172320131375Smrg	}
172422944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
172522944501Smrg
172622944501Smrg	return ret;
172722944501Smrg}
172822944501Smrg
17296260e5d5Smrgdrm_public int
1730a884aba1Smrgdrm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
173120131375Smrg{
173220131375Smrg	return drm_intel_gem_bo_unmap(bo);
173320131375Smrg}
173420131375Smrg
173522944501Smrgstatic int
173622944501Smrgdrm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
173722944501Smrg			 unsigned long size, const void *data)
173822944501Smrg{
173922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
174022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
174122944501Smrg	struct drm_i915_gem_pwrite pwrite;
174222944501Smrg	int ret;
174322944501Smrg
1744a884aba1Smrg	if (bo_gem->is_userptr)
1745a884aba1Smrg		return -EINVAL;
1746a884aba1Smrg
1747424e9256Smrg	memclear(pwrite);
174822944501Smrg	pwrite.handle = bo_gem->gem_handle;
174922944501Smrg	pwrite.offset = offset;
175022944501Smrg	pwrite.size = size;
175122944501Smrg	pwrite.data_ptr = (uint64_t) (uintptr_t) data;
17526d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
17536d98c517Smrg		       DRM_IOCTL_I915_GEM_PWRITE,
17546d98c517Smrg		       &pwrite);
175522944501Smrg	if (ret != 0) {
175622944501Smrg		ret = -errno;
17579ce4edccSmrg		DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
17589ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
17599ce4edccSmrg		    (int)size, strerror(errno));
176022944501Smrg	}
176122944501Smrg
176222944501Smrg	return ret;
176322944501Smrg}
176422944501Smrg
176522944501Smrgstatic int
176622944501Smrgdrm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
176722944501Smrg{
176822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
176922944501Smrg	struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
177022944501Smrg	int ret;
177122944501Smrg
1772424e9256Smrg	memclear(get_pipe_from_crtc_id);
177322944501Smrg	get_pipe_from_crtc_id.crtc_id = crtc_id;
17746d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
17756d98c517Smrg		       DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
17766d98c517Smrg		       &get_pipe_from_crtc_id);
177722944501Smrg	if (ret != 0) {
177822944501Smrg		/* We return -1 here to signal that we don't
177922944501Smrg		 * know which pipe is associated with this crtc.
178022944501Smrg		 * This lets the caller know that this information
178122944501Smrg		 * isn't available; using the wrong pipe for
178222944501Smrg		 * vblank waiting can cause the chipset to lock up
178322944501Smrg		 */
178422944501Smrg		return -1;
178522944501Smrg	}
178622944501Smrg
178722944501Smrg	return get_pipe_from_crtc_id.pipe;
178822944501Smrg}
178922944501Smrg
179022944501Smrgstatic int
179122944501Smrgdrm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
179222944501Smrg			     unsigned long size, void *data)
179322944501Smrg{
179422944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
179522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
179622944501Smrg	struct drm_i915_gem_pread pread;
179722944501Smrg	int ret;
179822944501Smrg
1799a884aba1Smrg	if (bo_gem->is_userptr)
1800a884aba1Smrg		return -EINVAL;
1801a884aba1Smrg
1802424e9256Smrg	memclear(pread);
180322944501Smrg	pread.handle = bo_gem->gem_handle;
180422944501Smrg	pread.offset = offset;
180522944501Smrg	pread.size = size;
180622944501Smrg	pread.data_ptr = (uint64_t) (uintptr_t) data;
18076d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
18086d98c517Smrg		       DRM_IOCTL_I915_GEM_PREAD,
18096d98c517Smrg		       &pread);
181022944501Smrg	if (ret != 0) {
181122944501Smrg		ret = -errno;
18129ce4edccSmrg		DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
18139ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
18149ce4edccSmrg		    (int)size, strerror(errno));
181522944501Smrg	}
181622944501Smrg
181722944501Smrg	return ret;
181822944501Smrg}
181922944501Smrg
18209ce4edccSmrg/** Waits for all GPU rendering with the object to have completed. */
182122944501Smrgstatic void
182222944501Smrgdrm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
182322944501Smrg{
18249ce4edccSmrg	drm_intel_gem_bo_start_gtt_access(bo, 1);
182522944501Smrg}
182622944501Smrg
182720131375Smrg/**
182820131375Smrg * Waits on a BO for the given amount of time.
182920131375Smrg *
183020131375Smrg * @bo: buffer object to wait for
183120131375Smrg * @timeout_ns: amount of time to wait in nanoseconds.
183220131375Smrg *   If value is less than 0, an infinite wait will occur.
183320131375Smrg *
183420131375Smrg * Returns 0 if the wait was successful ie. the last batch referencing the
183520131375Smrg * object has completed within the allotted time. Otherwise some negative return
183620131375Smrg * value describes the error. Of particular interest is -ETIME when the wait has
183720131375Smrg * failed to yield the desired result.
183820131375Smrg *
183920131375Smrg * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
184020131375Smrg * the operation to give up after a certain amount of time. Another subtle
184120131375Smrg * difference is the internal locking semantics are different (this variant does
184220131375Smrg * not hold the lock for the duration of the wait). This makes the wait subject
184320131375Smrg * to a larger userspace race window.
184420131375Smrg *
184520131375Smrg * The implementation shall wait until the object is no longer actively
184620131375Smrg * referenced within a batch buffer at the time of the call. The wait will
184720131375Smrg * not guarantee that the buffer is re-issued via another thread, or an flinked
184820131375Smrg * handle. Userspace must make sure this race does not occur if such precision
184920131375Smrg * is important.
1850424e9256Smrg *
1851424e9256Smrg * Note that some kernels have broken the inifite wait for negative values
1852424e9256Smrg * promise, upgrade to latest stable kernels if this is the case.
185320131375Smrg */
18546260e5d5Smrgdrm_public int
1855a884aba1Smrgdrm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
185620131375Smrg{
185720131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
185820131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
185920131375Smrg	struct drm_i915_gem_wait wait;
186020131375Smrg	int ret;
186120131375Smrg
186220131375Smrg	if (!bufmgr_gem->has_wait_timeout) {
186320131375Smrg		DBG("%s:%d: Timed wait is not supported. Falling back to "
186420131375Smrg		    "infinite wait\n", __FILE__, __LINE__);
186520131375Smrg		if (timeout_ns) {
186620131375Smrg			drm_intel_gem_bo_wait_rendering(bo);
186720131375Smrg			return 0;
186820131375Smrg		} else {
186920131375Smrg			return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
187020131375Smrg		}
187120131375Smrg	}
187220131375Smrg
1873424e9256Smrg	memclear(wait);
187420131375Smrg	wait.bo_handle = bo_gem->gem_handle;
187520131375Smrg	wait.timeout_ns = timeout_ns;
187620131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
187720131375Smrg	if (ret == -1)
187820131375Smrg		return -errno;
187920131375Smrg
188020131375Smrg	return ret;
188120131375Smrg}
188220131375Smrg
188322944501Smrg/**
188422944501Smrg * Sets the object to the GTT read and possibly write domain, used by the X
188522944501Smrg * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
188622944501Smrg *
188722944501Smrg * In combination with drm_intel_gem_bo_pin() and manual fence management, we
188822944501Smrg * can do tiled pixmaps this way.
188922944501Smrg */
18906260e5d5Smrgdrm_public void
189122944501Smrgdrm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
189222944501Smrg{
189322944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
189422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
189522944501Smrg	struct drm_i915_gem_set_domain set_domain;
189622944501Smrg	int ret;
189722944501Smrg
1898424e9256Smrg	memclear(set_domain);
189922944501Smrg	set_domain.handle = bo_gem->gem_handle;
190022944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
190122944501Smrg	set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
19026d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
19036d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
19046d98c517Smrg		       &set_domain);
190522944501Smrg	if (ret != 0) {
19069ce4edccSmrg		DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
19079ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
19089ce4edccSmrg		    set_domain.read_domains, set_domain.write_domain,
19099ce4edccSmrg		    strerror(errno));
191022944501Smrg	}
191122944501Smrg}
191222944501Smrg
191322944501Smrgstatic void
191422944501Smrgdrm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
191522944501Smrg{
191622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1917424e9256Smrg	struct drm_gem_close close_bo;
1918424e9256Smrg	int i, ret;
191922944501Smrg
192022944501Smrg	free(bufmgr_gem->exec2_objects);
192122944501Smrg	free(bufmgr_gem->exec_objects);
192222944501Smrg	free(bufmgr_gem->exec_bos);
192322944501Smrg
192422944501Smrg	pthread_mutex_destroy(&bufmgr_gem->lock);
192522944501Smrg
192622944501Smrg	/* Free any cached buffer objects we were going to reuse */
1927aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
192822944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
192922944501Smrg		    &bufmgr_gem->cache_bucket[i];
193022944501Smrg		drm_intel_bo_gem *bo_gem;
193122944501Smrg
193222944501Smrg		while (!DRMLISTEMPTY(&bucket->head)) {
193322944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
193422944501Smrg					      bucket->head.next, head);
193522944501Smrg			DRMLISTDEL(&bo_gem->head);
193622944501Smrg
193722944501Smrg			drm_intel_gem_bo_free(&bo_gem->bo);
193822944501Smrg		}
193922944501Smrg	}
194022944501Smrg
1941424e9256Smrg	/* Release userptr bo kept hanging around for optimisation. */
1942424e9256Smrg	if (bufmgr_gem->userptr_active.ptr) {
1943424e9256Smrg		memclear(close_bo);
1944424e9256Smrg		close_bo.handle = bufmgr_gem->userptr_active.handle;
1945424e9256Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1946424e9256Smrg		free(bufmgr_gem->userptr_active.ptr);
1947424e9256Smrg		if (ret)
1948424e9256Smrg			fprintf(stderr,
1949424e9256Smrg				"Failed to release test userptr object! (%d) "
1950424e9256Smrg				"i915 kernel driver may not be sane!\n", errno);
1951424e9256Smrg	}
1952424e9256Smrg
195322944501Smrg	free(bufmgr);
195422944501Smrg}
195522944501Smrg
195622944501Smrg/**
195722944501Smrg * Adds the target buffer to the validation list and adds the relocation
195822944501Smrg * to the reloc_buffer's relocation list.
195922944501Smrg *
196022944501Smrg * The relocation entry at the given offset must already contain the
196122944501Smrg * precomputed relocation value, because the kernel will optimize out
196222944501Smrg * the relocation entry write when the buffer hasn't moved from the
196322944501Smrg * last known offset in target_bo.
196422944501Smrg */
196522944501Smrgstatic int
196622944501Smrgdo_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
196722944501Smrg		 drm_intel_bo *target_bo, uint32_t target_offset,
196822944501Smrg		 uint32_t read_domains, uint32_t write_domain,
196920131375Smrg		 bool need_fence)
197022944501Smrg{
197122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
197222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
197322944501Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
197420131375Smrg	bool fenced_command;
197522944501Smrg
197622944501Smrg	if (bo_gem->has_error)
197722944501Smrg		return -ENOMEM;
197822944501Smrg
197922944501Smrg	if (target_bo_gem->has_error) {
198020131375Smrg		bo_gem->has_error = true;
198122944501Smrg		return -ENOMEM;
198222944501Smrg	}
198322944501Smrg
198422944501Smrg	/* We never use HW fences for rendering on 965+ */
198522944501Smrg	if (bufmgr_gem->gen >= 4)
198620131375Smrg		need_fence = false;
198722944501Smrg
19889ce4edccSmrg	fenced_command = need_fence;
19899ce4edccSmrg	if (target_bo_gem->tiling_mode == I915_TILING_NONE)
199020131375Smrg		need_fence = false;
19919ce4edccSmrg
199222944501Smrg	/* Create a new relocation list if needed */
199322944501Smrg	if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
199422944501Smrg		return -ENOMEM;
199522944501Smrg
199622944501Smrg	/* Check overflow */
199722944501Smrg	assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
199822944501Smrg
199922944501Smrg	/* Check args */
200022944501Smrg	assert(offset <= bo->size - 4);
200122944501Smrg	assert((write_domain & (write_domain - 1)) == 0);
200222944501Smrg
20033c748557Ssnj	/* An object needing a fence is a tiled buffer, so it won't have
20043c748557Ssnj	 * relocs to other buffers.
20053c748557Ssnj	 */
20063c748557Ssnj	if (need_fence) {
20073c748557Ssnj		assert(target_bo_gem->reloc_count == 0);
20083c748557Ssnj		target_bo_gem->reloc_tree_fences = 1;
20093c748557Ssnj	}
20103c748557Ssnj
201122944501Smrg	/* Make sure that we're not adding a reloc to something whose size has
201222944501Smrg	 * already been accounted for.
201322944501Smrg	 */
201422944501Smrg	assert(!bo_gem->used_as_reloc_target);
2015aaba2545Smrg	if (target_bo_gem != bo_gem) {
201620131375Smrg		target_bo_gem->used_as_reloc_target = true;
2017aaba2545Smrg		bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
20183c748557Ssnj		bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
2019aaba2545Smrg	}
202022944501Smrg
202122944501Smrg	bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
2022aaba2545Smrg	if (target_bo != bo)
2023aaba2545Smrg		drm_intel_gem_bo_reference(target_bo);
20249ce4edccSmrg	if (fenced_command)
202522944501Smrg		bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
202622944501Smrg			DRM_INTEL_RELOC_FENCE;
202722944501Smrg	else
202822944501Smrg		bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
202922944501Smrg
2030fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2031fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2032fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].target_handle =
2033fe517fc9Smrg	    target_bo_gem->gem_handle;
2034fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2035fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2036fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
203722944501Smrg	bo_gem->reloc_count++;
203822944501Smrg
203922944501Smrg	return 0;
204022944501Smrg}
204122944501Smrg
2042fe517fc9Smrgstatic void
2043fe517fc9Smrgdrm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2044fe517fc9Smrg{
2045fe517fc9Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
20460655efefSmrg
20470655efefSmrg	if (enable)
20480655efefSmrg		bo_gem->kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
20490655efefSmrg	else
20500655efefSmrg		bo_gem->kflags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2051fe517fc9Smrg}
2052fe517fc9Smrg
2053fe517fc9Smrgstatic int
2054fe517fc9Smrgdrm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2055fe517fc9Smrg{
2056fe517fc9Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2057fe517fc9Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2058fe517fc9Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2059fe517fc9Smrg	if (bo_gem->has_error)
2060fe517fc9Smrg		return -ENOMEM;
2061fe517fc9Smrg
2062fe517fc9Smrg	if (target_bo_gem->has_error) {
2063fe517fc9Smrg		bo_gem->has_error = true;
2064fe517fc9Smrg		return -ENOMEM;
2065fe517fc9Smrg	}
2066fe517fc9Smrg
20670655efefSmrg	if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED))
2068fe517fc9Smrg		return -EINVAL;
2069fe517fc9Smrg	if (target_bo_gem == bo_gem)
2070fe517fc9Smrg		return -EINVAL;
2071fe517fc9Smrg
2072fe517fc9Smrg	if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2073fe517fc9Smrg		int new_size = bo_gem->softpin_target_size * 2;
2074fe517fc9Smrg		if (new_size == 0)
2075fe517fc9Smrg			new_size = bufmgr_gem->max_relocs;
2076fe517fc9Smrg
2077fe517fc9Smrg		bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2078fe517fc9Smrg				sizeof(drm_intel_bo *));
2079fe517fc9Smrg		if (!bo_gem->softpin_target)
2080fe517fc9Smrg			return -ENOMEM;
2081fe517fc9Smrg
2082fe517fc9Smrg		bo_gem->softpin_target_size = new_size;
2083fe517fc9Smrg	}
2084fe517fc9Smrg	bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2085fe517fc9Smrg	drm_intel_gem_bo_reference(target_bo);
2086fe517fc9Smrg	bo_gem->softpin_target_count++;
2087fe517fc9Smrg
2088fe517fc9Smrg	return 0;
2089fe517fc9Smrg}
2090fe517fc9Smrg
209122944501Smrgstatic int
209222944501Smrgdrm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
209322944501Smrg			    drm_intel_bo *target_bo, uint32_t target_offset,
209422944501Smrg			    uint32_t read_domains, uint32_t write_domain)
209522944501Smrg{
209622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2097fe517fc9Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
209822944501Smrg
20990655efefSmrg	if (target_bo_gem->kflags & EXEC_OBJECT_PINNED)
2100fe517fc9Smrg		return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2101fe517fc9Smrg	else
2102fe517fc9Smrg		return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2103fe517fc9Smrg					read_domains, write_domain,
2104fe517fc9Smrg					!bufmgr_gem->fenced_relocs);
210522944501Smrg}
210622944501Smrg
210722944501Smrgstatic int
210822944501Smrgdrm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
210922944501Smrg				  drm_intel_bo *target_bo,
211022944501Smrg				  uint32_t target_offset,
211122944501Smrg				  uint32_t read_domains, uint32_t write_domain)
211222944501Smrg{
211322944501Smrg	return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
211420131375Smrg				read_domains, write_domain, true);
211520131375Smrg}
211620131375Smrg
21176260e5d5Smrgdrm_public int
211820131375Smrgdrm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
211920131375Smrg{
212020131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
212120131375Smrg
212220131375Smrg	return bo_gem->reloc_count;
212320131375Smrg}
212420131375Smrg
212520131375Smrg/**
212620131375Smrg * Removes existing relocation entries in the BO after "start".
212720131375Smrg *
212820131375Smrg * This allows a user to avoid a two-step process for state setup with
212920131375Smrg * counting up all the buffer objects and doing a
213020131375Smrg * drm_intel_bufmgr_check_aperture_space() before emitting any of the
213120131375Smrg * relocations for the state setup.  Instead, save the state of the
213220131375Smrg * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
213320131375Smrg * state, and then check if it still fits in the aperture.
213420131375Smrg *
213520131375Smrg * Any further drm_intel_bufmgr_check_aperture_space() queries
213620131375Smrg * involving this buffer in the tree are undefined after this call.
2137fe517fc9Smrg *
2138fe517fc9Smrg * This also removes all softpinned targets being referenced by the BO.
213920131375Smrg */
21406260e5d5Smrgdrm_public void
214120131375Smrgdrm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
214220131375Smrg{
2143a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
214420131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
214520131375Smrg	int i;
214620131375Smrg	struct timespec time;
214720131375Smrg
214820131375Smrg	clock_gettime(CLOCK_MONOTONIC, &time);
214920131375Smrg
215020131375Smrg	assert(bo_gem->reloc_count >= start);
2151a884aba1Smrg
215220131375Smrg	/* Unreference the cleared target buffers */
2153a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
2154a884aba1Smrg
215520131375Smrg	for (i = start; i < bo_gem->reloc_count; i++) {
215620131375Smrg		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
215720131375Smrg		if (&target_bo_gem->bo != bo) {
215820131375Smrg			bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
215920131375Smrg			drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
216020131375Smrg								  time.tv_sec);
216120131375Smrg		}
216220131375Smrg	}
216320131375Smrg	bo_gem->reloc_count = start;
2164a884aba1Smrg
2165fe517fc9Smrg	for (i = 0; i < bo_gem->softpin_target_count; i++) {
2166fe517fc9Smrg		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2167fe517fc9Smrg		drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2168fe517fc9Smrg	}
2169fe517fc9Smrg	bo_gem->softpin_target_count = 0;
2170fe517fc9Smrg
2171a884aba1Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
2172a884aba1Smrg
217322944501Smrg}
217422944501Smrg
217522944501Smrg/**
217622944501Smrg * Walk the tree of relocations rooted at BO and accumulate the list of
217722944501Smrg * validations to be performed and update the relocation buffers with
217822944501Smrg * index values into the validation list.
217922944501Smrg */
218022944501Smrgstatic void
218122944501Smrgdrm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
218222944501Smrg{
218322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
218422944501Smrg	int i;
218522944501Smrg
218622944501Smrg	if (bo_gem->relocs == NULL)
218722944501Smrg		return;
218822944501Smrg
218922944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
219022944501Smrg		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
219122944501Smrg
2192aaba2545Smrg		if (target_bo == bo)
2193aaba2545Smrg			continue;
2194aaba2545Smrg
219520131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
219620131375Smrg
219722944501Smrg		/* Continue walking the tree depth-first. */
219822944501Smrg		drm_intel_gem_bo_process_reloc(target_bo);
219922944501Smrg
220022944501Smrg		/* Add the target to the validate list */
220122944501Smrg		drm_intel_add_validate_buffer(target_bo);
220222944501Smrg	}
220322944501Smrg}
220422944501Smrg
220522944501Smrgstatic void
220622944501Smrgdrm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
220722944501Smrg{
220822944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
220922944501Smrg	int i;
221022944501Smrg
2211fe517fc9Smrg	if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
221222944501Smrg		return;
221322944501Smrg
221422944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
221522944501Smrg		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
221622944501Smrg		int need_fence;
221722944501Smrg
2218aaba2545Smrg		if (target_bo == bo)
2219aaba2545Smrg			continue;
2220aaba2545Smrg
222120131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
222220131375Smrg
222322944501Smrg		/* Continue walking the tree depth-first. */
222422944501Smrg		drm_intel_gem_bo_process_reloc2(target_bo);
222522944501Smrg
222622944501Smrg		need_fence = (bo_gem->reloc_target_info[i].flags &
222722944501Smrg			      DRM_INTEL_RELOC_FENCE);
222822944501Smrg
222922944501Smrg		/* Add the target to the validate list */
223022944501Smrg		drm_intel_add_validate_buffer2(target_bo, need_fence);
223122944501Smrg	}
2232fe517fc9Smrg
2233fe517fc9Smrg	for (i = 0; i < bo_gem->softpin_target_count; i++) {
2234fe517fc9Smrg		drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2235fe517fc9Smrg
2236fe517fc9Smrg		if (target_bo == bo)
2237fe517fc9Smrg			continue;
2238fe517fc9Smrg
2239fe517fc9Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2240fe517fc9Smrg		drm_intel_gem_bo_process_reloc2(target_bo);
2241fe517fc9Smrg		drm_intel_add_validate_buffer2(target_bo, false);
2242fe517fc9Smrg	}
224322944501Smrg}
224422944501Smrg
224522944501Smrg
224622944501Smrgstatic void
224722944501Smrgdrm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
224822944501Smrg{
224922944501Smrg	int i;
225022944501Smrg
225122944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
225222944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
225322944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
225422944501Smrg
225522944501Smrg		/* Update the buffer offset */
225620131375Smrg		if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2257fe517fc9Smrg			DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2258d82d45b3Sjoerg			    bo_gem->gem_handle, bo_gem->name,
2259fe517fc9Smrg			    upper_32_bits(bo->offset64),
2260fe517fc9Smrg			    lower_32_bits(bo->offset64),
2261fe517fc9Smrg			    upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2262fe517fc9Smrg			    lower_32_bits(bufmgr_gem->exec_objects[i].offset));
226320131375Smrg			bo->offset64 = bufmgr_gem->exec_objects[i].offset;
226422944501Smrg			bo->offset = bufmgr_gem->exec_objects[i].offset;
226522944501Smrg		}
226622944501Smrg	}
226722944501Smrg}
226822944501Smrg
226922944501Smrgstatic void
227022944501Smrgdrm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
227122944501Smrg{
227222944501Smrg	int i;
227322944501Smrg
227422944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
227522944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
227622944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
227722944501Smrg
227822944501Smrg		/* Update the buffer offset */
227920131375Smrg		if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2280fe517fc9Smrg			/* If we're seeing softpinned object here it means that the kernel
2281fe517fc9Smrg			 * has relocated our object... Indicating a programming error
2282fe517fc9Smrg			 */
22830655efefSmrg			assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED));
2284fe517fc9Smrg			DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2285d82d45b3Sjoerg			    bo_gem->gem_handle, bo_gem->name,
2286fe517fc9Smrg			    upper_32_bits(bo->offset64),
2287fe517fc9Smrg			    lower_32_bits(bo->offset64),
2288fe517fc9Smrg			    upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2289fe517fc9Smrg			    lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
229020131375Smrg			bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
229122944501Smrg			bo->offset = bufmgr_gem->exec2_objects[i].offset;
229222944501Smrg		}
229322944501Smrg	}
229422944501Smrg}
229522944501Smrg
22966260e5d5Smrgdrm_public void
229720131375Smrgdrm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
229820131375Smrg			      int x1, int y1, int width, int height,
229920131375Smrg			      enum aub_dump_bmp_format format,
230020131375Smrg			      int pitch, int offset)
230120131375Smrg{
230220131375Smrg}
230320131375Smrg
230420131375Smrgstatic int
230520131375Smrgdrm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
230620131375Smrg		      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
230720131375Smrg{
230820131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
230920131375Smrg	struct drm_i915_gem_execbuffer execbuf;
231020131375Smrg	int ret, i;
231120131375Smrg
2312fe517fc9Smrg	if (to_bo_gem(bo)->has_error)
231320131375Smrg		return -ENOMEM;
231420131375Smrg
231520131375Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
231620131375Smrg	/* Update indices and set up the validate list. */
231720131375Smrg	drm_intel_gem_bo_process_reloc(bo);
231820131375Smrg
231920131375Smrg	/* Add the batch buffer to the validation list.  There are no
232020131375Smrg	 * relocations pointing to it.
232120131375Smrg	 */
232220131375Smrg	drm_intel_add_validate_buffer(bo);
232320131375Smrg
2324424e9256Smrg	memclear(execbuf);
232520131375Smrg	execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
232620131375Smrg	execbuf.buffer_count = bufmgr_gem->exec_count;
232720131375Smrg	execbuf.batch_start_offset = 0;
232820131375Smrg	execbuf.batch_len = used;
232920131375Smrg	execbuf.cliprects_ptr = (uintptr_t) cliprects;
233020131375Smrg	execbuf.num_cliprects = num_cliprects;
233120131375Smrg	execbuf.DR1 = 0;
233220131375Smrg	execbuf.DR4 = DR4;
233320131375Smrg
233420131375Smrg	ret = drmIoctl(bufmgr_gem->fd,
233520131375Smrg		       DRM_IOCTL_I915_GEM_EXECBUFFER,
233620131375Smrg		       &execbuf);
233720131375Smrg	if (ret != 0) {
233820131375Smrg		ret = -errno;
233920131375Smrg		if (errno == ENOSPC) {
234020131375Smrg			DBG("Execbuffer fails to pin. "
234120131375Smrg			    "Estimate: %u. Actual: %u. Available: %u\n",
234220131375Smrg			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
234320131375Smrg							       bufmgr_gem->
234420131375Smrg							       exec_count),
234520131375Smrg			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
234620131375Smrg							      bufmgr_gem->
234720131375Smrg							      exec_count),
234820131375Smrg			    (unsigned int)bufmgr_gem->gtt_size);
234920131375Smrg		}
235020131375Smrg	}
235120131375Smrg	drm_intel_update_buffer_offsets(bufmgr_gem);
235220131375Smrg
235320131375Smrg	if (bufmgr_gem->bufmgr.debug)
235420131375Smrg		drm_intel_gem_dump_validation_list(bufmgr_gem);
235520131375Smrg
235620131375Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2357fe517fc9Smrg		drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
235820131375Smrg
235920131375Smrg		bo_gem->idle = false;
236020131375Smrg
236120131375Smrg		/* Disconnect the buffer from the validate list */
236220131375Smrg		bo_gem->validate_index = -1;
236320131375Smrg		bufmgr_gem->exec_bos[i] = NULL;
236420131375Smrg	}
236520131375Smrg	bufmgr_gem->exec_count = 0;
236620131375Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
236720131375Smrg
236820131375Smrg	return ret;
236920131375Smrg}
237020131375Smrg
237120131375Smrgstatic int
237220131375Smrgdo_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
237320131375Smrg	 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
23742ee35494Smrg	 int in_fence, int *out_fence,
237520131375Smrg	 unsigned int flags)
237620131375Smrg{
237720131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
237820131375Smrg	struct drm_i915_gem_execbuffer2 execbuf;
237920131375Smrg	int ret = 0;
238020131375Smrg	int i;
238120131375Smrg
2382fe517fc9Smrg	if (to_bo_gem(bo)->has_error)
2383fe517fc9Smrg		return -ENOMEM;
2384fe517fc9Smrg
238520131375Smrg	switch (flags & 0x7) {
238620131375Smrg	default:
238720131375Smrg		return -EINVAL;
238820131375Smrg	case I915_EXEC_BLT:
23899ce4edccSmrg		if (!bufmgr_gem->has_blt)
23909ce4edccSmrg			return -EINVAL;
23919ce4edccSmrg		break;
23929ce4edccSmrg	case I915_EXEC_BSD:
23939ce4edccSmrg		if (!bufmgr_gem->has_bsd)
23949ce4edccSmrg			return -EINVAL;
23959ce4edccSmrg		break;
239620131375Smrg	case I915_EXEC_VEBOX:
239720131375Smrg		if (!bufmgr_gem->has_vebox)
239820131375Smrg			return -EINVAL;
239920131375Smrg		break;
24009ce4edccSmrg	case I915_EXEC_RENDER:
24019ce4edccSmrg	case I915_EXEC_DEFAULT:
24029ce4edccSmrg		break;
24039ce4edccSmrg	}
2404aaba2545Smrg
240522944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
240622944501Smrg	/* Update indices and set up the validate list. */
240722944501Smrg	drm_intel_gem_bo_process_reloc2(bo);
240822944501Smrg
240922944501Smrg	/* Add the batch buffer to the validation list.  There are no relocations
241022944501Smrg	 * pointing to it.
241122944501Smrg	 */
241222944501Smrg	drm_intel_add_validate_buffer2(bo, 0);
241322944501Smrg
2414424e9256Smrg	memclear(execbuf);
241522944501Smrg	execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
241622944501Smrg	execbuf.buffer_count = bufmgr_gem->exec_count;
241722944501Smrg	execbuf.batch_start_offset = 0;
241822944501Smrg	execbuf.batch_len = used;
241922944501Smrg	execbuf.cliprects_ptr = (uintptr_t)cliprects;
242022944501Smrg	execbuf.num_cliprects = num_cliprects;
242122944501Smrg	execbuf.DR1 = 0;
242222944501Smrg	execbuf.DR4 = DR4;
242320131375Smrg	execbuf.flags = flags;
242420131375Smrg	if (ctx == NULL)
242520131375Smrg		i915_execbuffer2_set_context_id(execbuf, 0);
242620131375Smrg	else
242720131375Smrg		i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
242822944501Smrg	execbuf.rsvd2 = 0;
24292ee35494Smrg	if (in_fence != -1) {
24302ee35494Smrg		execbuf.rsvd2 = in_fence;
24312ee35494Smrg		execbuf.flags |= I915_EXEC_FENCE_IN;
24322ee35494Smrg	}
24332ee35494Smrg	if (out_fence != NULL) {
24342ee35494Smrg		*out_fence = -1;
24352ee35494Smrg		execbuf.flags |= I915_EXEC_FENCE_OUT;
24362ee35494Smrg	}
243722944501Smrg
243820131375Smrg	if (bufmgr_gem->no_exec)
243920131375Smrg		goto skip_execution;
244020131375Smrg
24416d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
24422ee35494Smrg		       DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
24436d98c517Smrg		       &execbuf);
244422944501Smrg	if (ret != 0) {
244522944501Smrg		ret = -errno;
24466d98c517Smrg		if (ret == -ENOSPC) {
24479ce4edccSmrg			DBG("Execbuffer fails to pin. "
24489ce4edccSmrg			    "Estimate: %u. Actual: %u. Available: %u\n",
24499ce4edccSmrg			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
24509ce4edccSmrg							       bufmgr_gem->exec_count),
24519ce4edccSmrg			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
24529ce4edccSmrg							      bufmgr_gem->exec_count),
24539ce4edccSmrg			    (unsigned int) bufmgr_gem->gtt_size);
245422944501Smrg		}
245522944501Smrg	}
245622944501Smrg	drm_intel_update_buffer_offsets2(bufmgr_gem);
245722944501Smrg
24582ee35494Smrg	if (ret == 0 && out_fence != NULL)
24592ee35494Smrg		*out_fence = execbuf.rsvd2 >> 32;
24602ee35494Smrg
246120131375Smrgskip_execution:
246222944501Smrg	if (bufmgr_gem->bufmgr.debug)
246322944501Smrg		drm_intel_gem_dump_validation_list(bufmgr_gem);
246422944501Smrg
246522944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2466fe517fc9Smrg		drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
246722944501Smrg
246820131375Smrg		bo_gem->idle = false;
246920131375Smrg
247022944501Smrg		/* Disconnect the buffer from the validate list */
247122944501Smrg		bo_gem->validate_index = -1;
247222944501Smrg		bufmgr_gem->exec_bos[i] = NULL;
247322944501Smrg	}
247422944501Smrg	bufmgr_gem->exec_count = 0;
247522944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
247622944501Smrg
247722944501Smrg	return ret;
247822944501Smrg}
247922944501Smrg
2480aaba2545Smrgstatic int
2481aaba2545Smrgdrm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2482aaba2545Smrg		       drm_clip_rect_t *cliprects, int num_cliprects,
2483aaba2545Smrg		       int DR4)
2484aaba2545Smrg{
248520131375Smrg	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
24862ee35494Smrg			-1, NULL, I915_EXEC_RENDER);
248720131375Smrg}
248820131375Smrg
248920131375Smrgstatic int
249020131375Smrgdrm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
249120131375Smrg			drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
249220131375Smrg			unsigned int flags)
249320131375Smrg{
249420131375Smrg	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
24952ee35494Smrg			-1, NULL, flags);
249620131375Smrg}
249720131375Smrg
24986260e5d5Smrgdrm_public int
249920131375Smrgdrm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
250020131375Smrg			      int used, unsigned int flags)
250120131375Smrg{
25022ee35494Smrg	return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
25032ee35494Smrg}
25042ee35494Smrg
25056260e5d5Smrgdrm_public int
25062ee35494Smrgdrm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
25072ee35494Smrg			    drm_intel_context *ctx,
25082ee35494Smrg			    int used,
25092ee35494Smrg			    int in_fence,
25102ee35494Smrg			    int *out_fence,
25112ee35494Smrg			    unsigned int flags)
25122ee35494Smrg{
25132ee35494Smrg	return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
2514aaba2545Smrg}
2515aaba2545Smrg
251622944501Smrgstatic int
251722944501Smrgdrm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
251822944501Smrg{
251922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
252022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
252122944501Smrg	struct drm_i915_gem_pin pin;
252222944501Smrg	int ret;
252322944501Smrg
2524424e9256Smrg	memclear(pin);
252522944501Smrg	pin.handle = bo_gem->gem_handle;
252622944501Smrg	pin.alignment = alignment;
252722944501Smrg
25286d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
25296d98c517Smrg		       DRM_IOCTL_I915_GEM_PIN,
25306d98c517Smrg		       &pin);
253122944501Smrg	if (ret != 0)
253222944501Smrg		return -errno;
253322944501Smrg
253420131375Smrg	bo->offset64 = pin.offset;
253522944501Smrg	bo->offset = pin.offset;
253622944501Smrg	return 0;
253722944501Smrg}
253822944501Smrg
253922944501Smrgstatic int
254022944501Smrgdrm_intel_gem_bo_unpin(drm_intel_bo *bo)
254122944501Smrg{
254222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
254322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
254422944501Smrg	struct drm_i915_gem_unpin unpin;
254522944501Smrg	int ret;
254622944501Smrg
2547424e9256Smrg	memclear(unpin);
254822944501Smrg	unpin.handle = bo_gem->gem_handle;
254922944501Smrg
25506d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
255122944501Smrg	if (ret != 0)
255222944501Smrg		return -errno;
255322944501Smrg
255422944501Smrg	return 0;
255522944501Smrg}
255622944501Smrg
255722944501Smrgstatic int
25586d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
25596d98c517Smrg				     uint32_t tiling_mode,
25606d98c517Smrg				     uint32_t stride)
256122944501Smrg{
256222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
256322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
256422944501Smrg	struct drm_i915_gem_set_tiling set_tiling;
256522944501Smrg	int ret;
256622944501Smrg
25676d98c517Smrg	if (bo_gem->global_name == 0 &&
25686d98c517Smrg	    tiling_mode == bo_gem->tiling_mode &&
25696d98c517Smrg	    stride == bo_gem->stride)
257022944501Smrg		return 0;
257122944501Smrg
257222944501Smrg	memset(&set_tiling, 0, sizeof(set_tiling));
257322944501Smrg	do {
25746d98c517Smrg		/* set_tiling is slightly broken and overwrites the
25756d98c517Smrg		 * input on the error path, so we have to open code
25766d98c517Smrg		 * rmIoctl.
25776d98c517Smrg		 */
25786d98c517Smrg		set_tiling.handle = bo_gem->gem_handle;
25796d98c517Smrg		set_tiling.tiling_mode = tiling_mode;
258022944501Smrg		set_tiling.stride = stride;
258122944501Smrg
258222944501Smrg		ret = ioctl(bufmgr_gem->fd,
258322944501Smrg			    DRM_IOCTL_I915_GEM_SET_TILING,
258422944501Smrg			    &set_tiling);
25856d98c517Smrg	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
25866d98c517Smrg	if (ret == -1)
25876d98c517Smrg		return -errno;
25886d98c517Smrg
25896d98c517Smrg	bo_gem->tiling_mode = set_tiling.tiling_mode;
25906d98c517Smrg	bo_gem->swizzle_mode = set_tiling.swizzle_mode;
25916d98c517Smrg	bo_gem->stride = set_tiling.stride;
25926d98c517Smrg	return 0;
25936d98c517Smrg}
25946d98c517Smrg
25956d98c517Smrgstatic int
25966d98c517Smrgdrm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
25976d98c517Smrg			    uint32_t stride)
25986d98c517Smrg{
25996d98c517Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
26006d98c517Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
26016d98c517Smrg	int ret;
26026d98c517Smrg
2603a884aba1Smrg	/* Tiling with userptr surfaces is not supported
2604a884aba1Smrg	 * on all hardware so refuse it for time being.
2605a884aba1Smrg	 */
2606a884aba1Smrg	if (bo_gem->is_userptr)
2607a884aba1Smrg		return -EINVAL;
2608a884aba1Smrg
26096d98c517Smrg	/* Linear buffers have no stride. By ensuring that we only ever use
26106d98c517Smrg	 * stride 0 with linear buffers, we simplify our code.
26116d98c517Smrg	 */
26126d98c517Smrg	if (*tiling_mode == I915_TILING_NONE)
26136d98c517Smrg		stride = 0;
26146d98c517Smrg
26156d98c517Smrg	ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
26166d98c517Smrg	if (ret == 0)
2617fe517fc9Smrg		drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
261822944501Smrg
261922944501Smrg	*tiling_mode = bo_gem->tiling_mode;
2620aaba2545Smrg	return ret;
262122944501Smrg}
262222944501Smrg
262322944501Smrgstatic int
262422944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
262522944501Smrg			    uint32_t * swizzle_mode)
262622944501Smrg{
262722944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
262822944501Smrg
262922944501Smrg	*tiling_mode = bo_gem->tiling_mode;
263022944501Smrg	*swizzle_mode = bo_gem->swizzle_mode;
263122944501Smrg	return 0;
263222944501Smrg}
263322944501Smrg
2634fe517fc9Smrgstatic int
2635fe517fc9Smrgdrm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2636fe517fc9Smrg{
2637fe517fc9Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2638fe517fc9Smrg
2639fe517fc9Smrg	bo->offset64 = offset;
2640fe517fc9Smrg	bo->offset = offset;
26410655efefSmrg	bo_gem->kflags |= EXEC_OBJECT_PINNED;
26420655efefSmrg
2643fe517fc9Smrg	return 0;
2644fe517fc9Smrg}
2645fe517fc9Smrg
26466260e5d5Smrgdrm_public drm_intel_bo *
264720131375Smrgdrm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
264820131375Smrg{
264920131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
265020131375Smrg	int ret;
265120131375Smrg	uint32_t handle;
265220131375Smrg	drm_intel_bo_gem *bo_gem;
265320131375Smrg
2654fe517fc9Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
265520131375Smrg	ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2656fe517fc9Smrg	if (ret) {
2657fe517fc9Smrg		DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2658fe517fc9Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
2659fe517fc9Smrg		return NULL;
2660fe517fc9Smrg	}
266120131375Smrg
266220131375Smrg	/*
266320131375Smrg	 * See if the kernel has already returned this buffer to us. Just as
266420131375Smrg	 * for named buffers, we must not create two bo's pointing at the same
266520131375Smrg	 * kernel object
266620131375Smrg	 */
26672ee35494Smrg	HASH_FIND(handle_hh, bufmgr_gem->handle_table,
26682ee35494Smrg		  &handle, sizeof(handle), bo_gem);
26692ee35494Smrg	if (bo_gem) {
26702ee35494Smrg		drm_intel_gem_bo_reference(&bo_gem->bo);
26712ee35494Smrg		goto out;
267220131375Smrg	}
267320131375Smrg
267420131375Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
26752ee35494Smrg	if (!bo_gem)
26762ee35494Smrg		goto out;
26772ee35494Smrg
26782ee35494Smrg	atomic_set(&bo_gem->refcount, 1);
26792ee35494Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
26802ee35494Smrg
268120131375Smrg	/* Determine size of bo.  The fd-to-handle ioctl really should
268220131375Smrg	 * return the size, but it doesn't.  If we have kernel 3.12 or
268320131375Smrg	 * later, we can lseek on the prime fd to get the size.  Older
268420131375Smrg	 * kernels will just fail, in which case we fall back to the
268520131375Smrg	 * provided (estimated or guess size). */
268620131375Smrg	ret = lseek(prime_fd, 0, SEEK_END);
268720131375Smrg	if (ret != -1)
268820131375Smrg		bo_gem->bo.size = ret;
268920131375Smrg	else
269020131375Smrg		bo_gem->bo.size = size;
269120131375Smrg
269220131375Smrg	bo_gem->bo.handle = handle;
269320131375Smrg	bo_gem->bo.bufmgr = bufmgr;
269420131375Smrg
269520131375Smrg	bo_gem->gem_handle = handle;
26962ee35494Smrg	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
26972ee35494Smrg		 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
269820131375Smrg
269920131375Smrg	bo_gem->name = "prime";
270020131375Smrg	bo_gem->validate_index = -1;
270120131375Smrg	bo_gem->reloc_tree_fences = 0;
270220131375Smrg	bo_gem->used_as_reloc_target = false;
270320131375Smrg	bo_gem->has_error = false;
270420131375Smrg	bo_gem->reusable = false;
270520131375Smrg
270687bf8e7cSmrg	ret = get_tiling_mode(bufmgr_gem, handle,
270787bf8e7cSmrg			      &bo_gem->tiling_mode, &bo_gem->swizzle_mode);
270887bf8e7cSmrg	if (ret)
27092ee35494Smrg		goto err;
27102ee35494Smrg
271120131375Smrg	/* XXX stride is unknown */
2712fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
271320131375Smrg
27142ee35494Smrgout:
27152ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
271620131375Smrg	return &bo_gem->bo;
27172ee35494Smrg
27182ee35494Smrgerr:
27192ee35494Smrg	drm_intel_gem_bo_free(&bo_gem->bo);
27202ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
27212ee35494Smrg	return NULL;
272220131375Smrg}
272320131375Smrg
27246260e5d5Smrgdrm_public int
272520131375Smrgdrm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
272620131375Smrg{
272720131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
272820131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
272920131375Smrg
273020131375Smrg	if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
273187bf8e7cSmrg			       DRM_CLOEXEC | DRM_RDWR, prime_fd) != 0)
273220131375Smrg		return -errno;
273320131375Smrg
273420131375Smrg	bo_gem->reusable = false;
273520131375Smrg
273620131375Smrg	return 0;
273720131375Smrg}
273820131375Smrg
273922944501Smrgstatic int
274022944501Smrgdrm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
274122944501Smrg{
274222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
274322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
274422944501Smrg
274522944501Smrg	if (!bo_gem->global_name) {
274620131375Smrg		struct drm_gem_flink flink;
274720131375Smrg
2748424e9256Smrg		memclear(flink);
274922944501Smrg		flink.handle = bo_gem->gem_handle;
27502ee35494Smrg		if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
27512ee35494Smrg			return -errno;
275222944501Smrg
2753a884aba1Smrg		pthread_mutex_lock(&bufmgr_gem->lock);
27542ee35494Smrg		if (!bo_gem->global_name) {
27552ee35494Smrg			bo_gem->global_name = flink.name;
27562ee35494Smrg			bo_gem->reusable = false;
2757a884aba1Smrg
27582ee35494Smrg			HASH_ADD(name_hh, bufmgr_gem->name_table,
27592ee35494Smrg				 global_name, sizeof(bo_gem->global_name),
27602ee35494Smrg				 bo_gem);
2761a884aba1Smrg		}
2762a884aba1Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
276322944501Smrg	}
276422944501Smrg
276522944501Smrg	*name = bo_gem->global_name;
276622944501Smrg	return 0;
276722944501Smrg}
276822944501Smrg
276922944501Smrg/**
277022944501Smrg * Enables unlimited caching of buffer objects for reuse.
277122944501Smrg *
277222944501Smrg * This is potentially very memory expensive, as the cache at each bucket
277322944501Smrg * size is only bounded by how many buffers of that size we've managed to have
277422944501Smrg * in flight at once.
277522944501Smrg */
27766260e5d5Smrgdrm_public void
277722944501Smrgdrm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
277822944501Smrg{
277922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
278022944501Smrg
278120131375Smrg	bufmgr_gem->bo_reuse = true;
278222944501Smrg}
278322944501Smrg
27842ee35494Smrg/**
27852ee35494Smrg * Disables implicit synchronisation before executing the bo
27862ee35494Smrg *
27872ee35494Smrg * This will cause rendering corruption unless you correctly manage explicit
27882ee35494Smrg * fences for all rendering involving this buffer - including use by others.
27892ee35494Smrg * Disabling the implicit serialisation is only required if that serialisation
27902ee35494Smrg * is too coarse (for example, you have split the buffer into many
27912ee35494Smrg * non-overlapping regions and are sharing the whole buffer between concurrent
27922ee35494Smrg * independent command streams).
27932ee35494Smrg *
27942ee35494Smrg * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
27952ee35494Smrg * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
27962ee35494Smrg * or subsequent execbufs involving the bo will generate EINVAL.
27972ee35494Smrg */
27986260e5d5Smrgdrm_public void
27992ee35494Smrgdrm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
28002ee35494Smrg{
28012ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
28022ee35494Smrg
28032ee35494Smrg	bo_gem->kflags |= EXEC_OBJECT_ASYNC;
28042ee35494Smrg}
28052ee35494Smrg
28062ee35494Smrg/**
28072ee35494Smrg * Enables implicit synchronisation before executing the bo
28082ee35494Smrg *
28092ee35494Smrg * This is the default behaviour of the kernel, to wait upon prior writes
28102ee35494Smrg * completing on the object before rendering with it, or to wait for prior
28112ee35494Smrg * reads to complete before writing into the object.
28122ee35494Smrg * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
28132ee35494Smrg * the kernel never to insert a stall before using the object. Then this
28142ee35494Smrg * function can be used to restore the implicit sync before subsequent
28152ee35494Smrg * rendering.
28162ee35494Smrg */
28176260e5d5Smrgdrm_public void
28182ee35494Smrgdrm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
28192ee35494Smrg{
28202ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
28212ee35494Smrg
28222ee35494Smrg	bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
28232ee35494Smrg}
28242ee35494Smrg
28252ee35494Smrg/**
28262ee35494Smrg * Query whether the kernel supports disabling of its implicit synchronisation
28272ee35494Smrg * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
28282ee35494Smrg */
28296260e5d5Smrgdrm_public int
28302ee35494Smrgdrm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
28312ee35494Smrg{
28322ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
28332ee35494Smrg
28342ee35494Smrg	return bufmgr_gem->has_exec_async;
28352ee35494Smrg}
28362ee35494Smrg
283722944501Smrg/**
283822944501Smrg * Enable use of fenced reloc type.
283922944501Smrg *
284022944501Smrg * New code should enable this to avoid unnecessary fence register
284122944501Smrg * allocation.  If this option is not enabled, all relocs will have fence
284222944501Smrg * register allocated.
284322944501Smrg */
28446260e5d5Smrgdrm_public void
284522944501Smrgdrm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
284622944501Smrg{
284722944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
284822944501Smrg
284922944501Smrg	if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
285020131375Smrg		bufmgr_gem->fenced_relocs = true;
285122944501Smrg}
285222944501Smrg
285322944501Smrg/**
285422944501Smrg * Return the additional aperture space required by the tree of buffer objects
285522944501Smrg * rooted at bo.
285622944501Smrg */
285722944501Smrgstatic int
285822944501Smrgdrm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
285922944501Smrg{
286022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
286122944501Smrg	int i;
286222944501Smrg	int total = 0;
286322944501Smrg
286422944501Smrg	if (bo == NULL || bo_gem->included_in_check_aperture)
286522944501Smrg		return 0;
286622944501Smrg
286722944501Smrg	total += bo->size;
286820131375Smrg	bo_gem->included_in_check_aperture = true;
286922944501Smrg
287022944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++)
287122944501Smrg		total +=
287222944501Smrg		    drm_intel_gem_bo_get_aperture_space(bo_gem->
287322944501Smrg							reloc_target_info[i].bo);
287422944501Smrg
287522944501Smrg	return total;
287622944501Smrg}
287722944501Smrg
287822944501Smrg/**
287922944501Smrg * Count the number of buffers in this list that need a fence reg
288022944501Smrg *
288122944501Smrg * If the count is greater than the number of available regs, we'll have
288222944501Smrg * to ask the caller to resubmit a batch with fewer tiled buffers.
288322944501Smrg *
288422944501Smrg * This function over-counts if the same buffer is used multiple times.
288522944501Smrg */
288622944501Smrgstatic unsigned int
288722944501Smrgdrm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
288822944501Smrg{
288922944501Smrg	int i;
289022944501Smrg	unsigned int total = 0;
289122944501Smrg
289222944501Smrg	for (i = 0; i < count; i++) {
289322944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
289422944501Smrg
289522944501Smrg		if (bo_gem == NULL)
289622944501Smrg			continue;
289722944501Smrg
289822944501Smrg		total += bo_gem->reloc_tree_fences;
289922944501Smrg	}
290022944501Smrg	return total;
290122944501Smrg}
290222944501Smrg
290322944501Smrg/**
290422944501Smrg * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
290522944501Smrg * for the next drm_intel_bufmgr_check_aperture_space() call.
290622944501Smrg */
290722944501Smrgstatic void
290822944501Smrgdrm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
290922944501Smrg{
291022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
291122944501Smrg	int i;
291222944501Smrg
291322944501Smrg	if (bo == NULL || !bo_gem->included_in_check_aperture)
291422944501Smrg		return;
291522944501Smrg
291620131375Smrg	bo_gem->included_in_check_aperture = false;
291722944501Smrg
291822944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++)
291922944501Smrg		drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
292022944501Smrg							   reloc_target_info[i].bo);
292122944501Smrg}
292222944501Smrg
292322944501Smrg/**
292422944501Smrg * Return a conservative estimate for the amount of aperture required
292522944501Smrg * for a collection of buffers. This may double-count some buffers.
292622944501Smrg */
292722944501Smrgstatic unsigned int
292822944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
292922944501Smrg{
293022944501Smrg	int i;
293122944501Smrg	unsigned int total = 0;
293222944501Smrg
293322944501Smrg	for (i = 0; i < count; i++) {
293422944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
293522944501Smrg		if (bo_gem != NULL)
293622944501Smrg			total += bo_gem->reloc_tree_size;
293722944501Smrg	}
293822944501Smrg	return total;
293922944501Smrg}
294022944501Smrg
294122944501Smrg/**
294222944501Smrg * Return the amount of aperture needed for a collection of buffers.
294322944501Smrg * This avoids double counting any buffers, at the cost of looking
294422944501Smrg * at every buffer in the set.
294522944501Smrg */
294622944501Smrgstatic unsigned int
294722944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
294822944501Smrg{
294922944501Smrg	int i;
295022944501Smrg	unsigned int total = 0;
295122944501Smrg
295222944501Smrg	for (i = 0; i < count; i++) {
295322944501Smrg		total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
295422944501Smrg		/* For the first buffer object in the array, we get an
295522944501Smrg		 * accurate count back for its reloc_tree size (since nothing
295622944501Smrg		 * had been flagged as being counted yet).  We can save that
295722944501Smrg		 * value out as a more conservative reloc_tree_size that
295822944501Smrg		 * avoids double-counting target buffers.  Since the first
295922944501Smrg		 * buffer happens to usually be the batch buffer in our
296022944501Smrg		 * callers, this can pull us back from doing the tree
296122944501Smrg		 * walk on every new batch emit.
296222944501Smrg		 */
296322944501Smrg		if (i == 0) {
296422944501Smrg			drm_intel_bo_gem *bo_gem =
296522944501Smrg			    (drm_intel_bo_gem *) bo_array[i];
296622944501Smrg			bo_gem->reloc_tree_size = total;
296722944501Smrg		}
296822944501Smrg	}
296922944501Smrg
297022944501Smrg	for (i = 0; i < count; i++)
297122944501Smrg		drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
297222944501Smrg	return total;
297322944501Smrg}
297422944501Smrg
297522944501Smrg/**
297622944501Smrg * Return -1 if the batchbuffer should be flushed before attempting to
297722944501Smrg * emit rendering referencing the buffers pointed to by bo_array.
297822944501Smrg *
297922944501Smrg * This is required because if we try to emit a batchbuffer with relocations
298022944501Smrg * to a tree of buffers that won't simultaneously fit in the aperture,
298122944501Smrg * the rendering will return an error at a point where the software is not
298222944501Smrg * prepared to recover from it.
298322944501Smrg *
298422944501Smrg * However, we also want to emit the batchbuffer significantly before we reach
298522944501Smrg * the limit, as a series of batchbuffers each of which references buffers
298622944501Smrg * covering almost all of the aperture means that at each emit we end up
298722944501Smrg * waiting to evict a buffer from the last rendering, and we get synchronous
298822944501Smrg * performance.  By emitting smaller batchbuffers, we eat some CPU overhead to
298922944501Smrg * get better parallelism.
299022944501Smrg */
299122944501Smrgstatic int
299222944501Smrgdrm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
299322944501Smrg{
299422944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem =
299522944501Smrg	    (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
299622944501Smrg	unsigned int total = 0;
299722944501Smrg	unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
299822944501Smrg	int total_fences;
299922944501Smrg
300022944501Smrg	/* Check for fence reg constraints if necessary */
300122944501Smrg	if (bufmgr_gem->available_fences) {
300222944501Smrg		total_fences = drm_intel_gem_total_fences(bo_array, count);
300322944501Smrg		if (total_fences > bufmgr_gem->available_fences)
300422944501Smrg			return -ENOSPC;
300522944501Smrg	}
300622944501Smrg
300722944501Smrg	total = drm_intel_gem_estimate_batch_space(bo_array, count);
300822944501Smrg
300922944501Smrg	if (total > threshold)
301022944501Smrg		total = drm_intel_gem_compute_batch_space(bo_array, count);
301122944501Smrg
301222944501Smrg	if (total > threshold) {
301322944501Smrg		DBG("check_space: overflowed available aperture, "
301422944501Smrg		    "%dkb vs %dkb\n",
301522944501Smrg		    total / 1024, (int)bufmgr_gem->gtt_size / 1024);
301622944501Smrg		return -ENOSPC;
301722944501Smrg	} else {
301822944501Smrg		DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
301922944501Smrg		    (int)bufmgr_gem->gtt_size / 1024);
302022944501Smrg		return 0;
302122944501Smrg	}
302222944501Smrg}
302322944501Smrg
302422944501Smrg/*
302522944501Smrg * Disable buffer reuse for objects which are shared with the kernel
302622944501Smrg * as scanout buffers
302722944501Smrg */
302822944501Smrgstatic int
302922944501Smrgdrm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
303022944501Smrg{
303122944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
303222944501Smrg
303320131375Smrg	bo_gem->reusable = false;
303422944501Smrg	return 0;
303522944501Smrg}
303622944501Smrg
3037aaba2545Smrgstatic int
3038aaba2545Smrgdrm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
3039aaba2545Smrg{
3040aaba2545Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3041aaba2545Smrg
3042aaba2545Smrg	return bo_gem->reusable;
3043aaba2545Smrg}
3044aaba2545Smrg
304522944501Smrgstatic int
304622944501Smrg_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
304722944501Smrg{
304822944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
304922944501Smrg	int i;
305022944501Smrg
305122944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
305222944501Smrg		if (bo_gem->reloc_target_info[i].bo == target_bo)
305322944501Smrg			return 1;
3054aaba2545Smrg		if (bo == bo_gem->reloc_target_info[i].bo)
3055aaba2545Smrg			continue;
305622944501Smrg		if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
305722944501Smrg						target_bo))
305822944501Smrg			return 1;
305922944501Smrg	}
306022944501Smrg
3061fe517fc9Smrg	for (i = 0; i< bo_gem->softpin_target_count; i++) {
3062fe517fc9Smrg		if (bo_gem->softpin_target[i] == target_bo)
3063fe517fc9Smrg			return 1;
3064fe517fc9Smrg		if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
3065fe517fc9Smrg			return 1;
3066fe517fc9Smrg	}
3067fe517fc9Smrg
306822944501Smrg	return 0;
306922944501Smrg}
307022944501Smrg
307122944501Smrg/** Return true if target_bo is referenced by bo's relocation tree. */
307222944501Smrgstatic int
307322944501Smrgdrm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
307422944501Smrg{
307522944501Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
307622944501Smrg
307722944501Smrg	if (bo == NULL || target_bo == NULL)
307822944501Smrg		return 0;
307922944501Smrg	if (target_bo_gem->used_as_reloc_target)
308022944501Smrg		return _drm_intel_gem_bo_references(bo, target_bo);
308122944501Smrg	return 0;
308222944501Smrg}
308322944501Smrg
3084aaba2545Smrgstatic void
3085aaba2545Smrgadd_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3086aaba2545Smrg{
3087aaba2545Smrg	unsigned int i = bufmgr_gem->num_buckets;
3088aaba2545Smrg
3089aaba2545Smrg	assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3090aaba2545Smrg
3091aaba2545Smrg	DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3092aaba2545Smrg	bufmgr_gem->cache_bucket[i].size = size;
3093aaba2545Smrg	bufmgr_gem->num_buckets++;
3094aaba2545Smrg}
3095aaba2545Smrg
3096aaba2545Smrgstatic void
3097aaba2545Smrginit_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3098aaba2545Smrg{
3099aaba2545Smrg	unsigned long size, cache_max_size = 64 * 1024 * 1024;
3100aaba2545Smrg
3101aaba2545Smrg	/* OK, so power of two buckets was too wasteful of memory.
3102aaba2545Smrg	 * Give 3 other sizes between each power of two, to hopefully
3103aaba2545Smrg	 * cover things accurately enough.  (The alternative is
3104aaba2545Smrg	 * probably to just go for exact matching of sizes, and assume
3105aaba2545Smrg	 * that for things like composited window resize the tiled
3106aaba2545Smrg	 * width/height alignment and rounding of sizes to pages will
3107aaba2545Smrg	 * get us useful cache hit rates anyway)
3108aaba2545Smrg	 */
3109aaba2545Smrg	add_bucket(bufmgr_gem, 4096);
3110aaba2545Smrg	add_bucket(bufmgr_gem, 4096 * 2);
3111aaba2545Smrg	add_bucket(bufmgr_gem, 4096 * 3);
3112aaba2545Smrg
3113aaba2545Smrg	/* Initialize the linked lists for BO reuse cache. */
3114aaba2545Smrg	for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3115aaba2545Smrg		add_bucket(bufmgr_gem, size);
3116aaba2545Smrg
3117aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 1 / 4);
3118aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 2 / 4);
3119aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 3 / 4);
3120aaba2545Smrg	}
3121aaba2545Smrg}
3122aaba2545Smrg
31236260e5d5Smrgdrm_public void
312420131375Smrgdrm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
312520131375Smrg{
312620131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
312720131375Smrg
312820131375Smrg	bufmgr_gem->vma_max = limit;
312920131375Smrg
313020131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
313120131375Smrg}
313220131375Smrg
31332ee35494Smrgstatic int
31342ee35494Smrgparse_devid_override(const char *devid_override)
31352ee35494Smrg{
31362ee35494Smrg	static const struct {
31372ee35494Smrg		const char *name;
31382ee35494Smrg		int pci_id;
31392ee35494Smrg	} name_map[] = {
31402ee35494Smrg		{ "brw", PCI_CHIP_I965_GM },
31412ee35494Smrg		{ "g4x", PCI_CHIP_GM45_GM },
31422ee35494Smrg		{ "ilk", PCI_CHIP_ILD_G },
31432ee35494Smrg		{ "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
31442ee35494Smrg		{ "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
31452ee35494Smrg		{ "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
31462ee35494Smrg		{ "byt", PCI_CHIP_VALLEYVIEW_3 },
31472ee35494Smrg		{ "bdw", 0x1620 | BDW_ULX },
31482ee35494Smrg		{ "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
31492ee35494Smrg		{ "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
31502ee35494Smrg	};
31512ee35494Smrg	unsigned int i;
31522ee35494Smrg
31532ee35494Smrg	for (i = 0; i < ARRAY_SIZE(name_map); i++) {
31542ee35494Smrg		if (!strcmp(name_map[i].name, devid_override))
31552ee35494Smrg			return name_map[i].pci_id;
31562ee35494Smrg	}
31572ee35494Smrg
31582ee35494Smrg	return strtod(devid_override, NULL);
31592ee35494Smrg}
31602ee35494Smrg
316120131375Smrg/**
316220131375Smrg * Get the PCI ID for the device.  This can be overridden by setting the
316320131375Smrg * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
316420131375Smrg */
316520131375Smrgstatic int
316620131375Smrgget_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
316720131375Smrg{
316820131375Smrg	char *devid_override;
3169424e9256Smrg	int devid = 0;
317020131375Smrg	int ret;
317120131375Smrg	drm_i915_getparam_t gp;
317220131375Smrg
317320131375Smrg	if (geteuid() == getuid()) {
317420131375Smrg		devid_override = getenv("INTEL_DEVID_OVERRIDE");
317520131375Smrg		if (devid_override) {
317620131375Smrg			bufmgr_gem->no_exec = true;
31772ee35494Smrg			return parse_devid_override(devid_override);
317820131375Smrg		}
317920131375Smrg	}
318020131375Smrg
3181424e9256Smrg	memclear(gp);
318220131375Smrg	gp.param = I915_PARAM_CHIPSET_ID;
318320131375Smrg	gp.value = &devid;
318420131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
318520131375Smrg	if (ret) {
318620131375Smrg		fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
318720131375Smrg		fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
318820131375Smrg	}
318920131375Smrg	return devid;
319020131375Smrg}
319120131375Smrg
31926260e5d5Smrgdrm_public int
319320131375Smrgdrm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
319420131375Smrg{
319520131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
319620131375Smrg
319720131375Smrg	return bufmgr_gem->pci_device;
319820131375Smrg}
319920131375Smrg
320020131375Smrg/**
320120131375Smrg * Sets the AUB filename.
320220131375Smrg *
320320131375Smrg * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
320420131375Smrg * for it to have any effect.
320520131375Smrg */
32066260e5d5Smrgdrm_public void
320720131375Smrgdrm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
320820131375Smrg				      const char *filename)
320920131375Smrg{
321020131375Smrg}
321120131375Smrg
321220131375Smrg/**
321320131375Smrg * Sets up AUB dumping.
321420131375Smrg *
321520131375Smrg * This is a trace file format that can be used with the simulator.
321620131375Smrg * Packets are emitted in a format somewhat like GPU command packets.
321720131375Smrg * You can set up a GTT and upload your objects into the referenced
321820131375Smrg * space, then send off batchbuffers and get BMPs out the other end.
321920131375Smrg */
32206260e5d5Smrgdrm_public void
322120131375Smrgdrm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
322220131375Smrg{
3223fe517fc9Smrg	fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3224fe517fc9Smrg		"Use intel_aubdump from intel-gpu-tools instead.  Install intel-gpu-tools,\n"
3225fe517fc9Smrg		"then run (for example)\n\n"
3226fe517fc9Smrg		"\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3227fe517fc9Smrg		"See the intel_aubdump man page for more details.\n");
322820131375Smrg}
322920131375Smrg
32306260e5d5Smrgdrm_public drm_intel_context *
323120131375Smrgdrm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
323220131375Smrg{
323320131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
323420131375Smrg	struct drm_i915_gem_context_create create;
323520131375Smrg	drm_intel_context *context = NULL;
323620131375Smrg	int ret;
323720131375Smrg
323820131375Smrg	context = calloc(1, sizeof(*context));
323920131375Smrg	if (!context)
324020131375Smrg		return NULL;
324120131375Smrg
3242424e9256Smrg	memclear(create);
324320131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
324420131375Smrg	if (ret != 0) {
324520131375Smrg		DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
324620131375Smrg		    strerror(errno));
324720131375Smrg		free(context);
324820131375Smrg		return NULL;
324920131375Smrg	}
325020131375Smrg
325120131375Smrg	context->ctx_id = create.ctx_id;
325220131375Smrg	context->bufmgr = bufmgr;
325320131375Smrg
325420131375Smrg	return context;
325520131375Smrg}
325620131375Smrg
32576260e5d5Smrgdrm_public int
32582ee35494Smrgdrm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
32592ee35494Smrg{
32602ee35494Smrg	if (ctx == NULL)
32612ee35494Smrg		return -EINVAL;
32622ee35494Smrg
32632ee35494Smrg	*ctx_id = ctx->ctx_id;
32642ee35494Smrg
32652ee35494Smrg	return 0;
32662ee35494Smrg}
32672ee35494Smrg
32686260e5d5Smrgdrm_public void
326920131375Smrgdrm_intel_gem_context_destroy(drm_intel_context *ctx)
327020131375Smrg{
327120131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
327220131375Smrg	struct drm_i915_gem_context_destroy destroy;
327320131375Smrg	int ret;
327420131375Smrg
327520131375Smrg	if (ctx == NULL)
327620131375Smrg		return;
327720131375Smrg
3278424e9256Smrg	memclear(destroy);
327920131375Smrg
328020131375Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
328120131375Smrg	destroy.ctx_id = ctx->ctx_id;
328220131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
328320131375Smrg		       &destroy);
328420131375Smrg	if (ret != 0)
328520131375Smrg		fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
328620131375Smrg			strerror(errno));
328720131375Smrg
328820131375Smrg	free(ctx);
328920131375Smrg}
329020131375Smrg
32916260e5d5Smrgdrm_public int
329220131375Smrgdrm_intel_get_reset_stats(drm_intel_context *ctx,
329320131375Smrg			  uint32_t *reset_count,
329420131375Smrg			  uint32_t *active,
329520131375Smrg			  uint32_t *pending)
329620131375Smrg{
329720131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
329820131375Smrg	struct drm_i915_reset_stats stats;
329920131375Smrg	int ret;
330020131375Smrg
330120131375Smrg	if (ctx == NULL)
330220131375Smrg		return -EINVAL;
330320131375Smrg
3304424e9256Smrg	memclear(stats);
330520131375Smrg
330620131375Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
330720131375Smrg	stats.ctx_id = ctx->ctx_id;
330820131375Smrg	ret = drmIoctl(bufmgr_gem->fd,
330920131375Smrg		       DRM_IOCTL_I915_GET_RESET_STATS,
331020131375Smrg		       &stats);
331120131375Smrg	if (ret == 0) {
331220131375Smrg		if (reset_count != NULL)
331320131375Smrg			*reset_count = stats.reset_count;
331420131375Smrg
331520131375Smrg		if (active != NULL)
331620131375Smrg			*active = stats.batch_active;
331720131375Smrg
331820131375Smrg		if (pending != NULL)
331920131375Smrg			*pending = stats.batch_pending;
332020131375Smrg	}
332120131375Smrg
332220131375Smrg	return ret;
332320131375Smrg}
332420131375Smrg
33256260e5d5Smrgdrm_public int
332620131375Smrgdrm_intel_reg_read(drm_intel_bufmgr *bufmgr,
332720131375Smrg		   uint32_t offset,
332820131375Smrg		   uint64_t *result)
332920131375Smrg{
333020131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
333120131375Smrg	struct drm_i915_reg_read reg_read;
333220131375Smrg	int ret;
333320131375Smrg
3334424e9256Smrg	memclear(reg_read);
333520131375Smrg	reg_read.offset = offset;
333620131375Smrg
333720131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
333820131375Smrg
333920131375Smrg	*result = reg_read.val;
334020131375Smrg	return ret;
334120131375Smrg}
334220131375Smrg
33436260e5d5Smrgdrm_public int
3344424e9256Smrgdrm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3345424e9256Smrg{
3346424e9256Smrg	drm_i915_getparam_t gp;
3347424e9256Smrg	int ret;
3348424e9256Smrg
3349424e9256Smrg	memclear(gp);
3350424e9256Smrg	gp.value = (int*)subslice_total;
3351424e9256Smrg	gp.param = I915_PARAM_SUBSLICE_TOTAL;
3352424e9256Smrg	ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3353424e9256Smrg	if (ret)
3354424e9256Smrg		return -errno;
3355424e9256Smrg
3356424e9256Smrg	return 0;
3357424e9256Smrg}
3358424e9256Smrg
33596260e5d5Smrgdrm_public int
3360424e9256Smrgdrm_intel_get_eu_total(int fd, unsigned int *eu_total)
3361424e9256Smrg{
3362424e9256Smrg	drm_i915_getparam_t gp;
3363424e9256Smrg	int ret;
3364424e9256Smrg
3365424e9256Smrg	memclear(gp);
3366424e9256Smrg	gp.value = (int*)eu_total;
3367424e9256Smrg	gp.param = I915_PARAM_EU_TOTAL;
3368424e9256Smrg	ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3369424e9256Smrg	if (ret)
3370424e9256Smrg		return -errno;
3371424e9256Smrg
3372424e9256Smrg	return 0;
3373424e9256Smrg}
337420131375Smrg
33756260e5d5Smrgdrm_public int
33762ee35494Smrgdrm_intel_get_pooled_eu(int fd)
33772ee35494Smrg{
33782ee35494Smrg	drm_i915_getparam_t gp;
33792ee35494Smrg	int ret = -1;
33802ee35494Smrg
33812ee35494Smrg	memclear(gp);
33822ee35494Smrg	gp.param = I915_PARAM_HAS_POOLED_EU;
33832ee35494Smrg	gp.value = &ret;
33842ee35494Smrg	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
33852ee35494Smrg		return -errno;
33862ee35494Smrg
33872ee35494Smrg	return ret;
33882ee35494Smrg}
33892ee35494Smrg
33906260e5d5Smrgdrm_public int
33912ee35494Smrgdrm_intel_get_min_eu_in_pool(int fd)
33922ee35494Smrg{
33932ee35494Smrg	drm_i915_getparam_t gp;
33942ee35494Smrg	int ret = -1;
33952ee35494Smrg
33962ee35494Smrg	memclear(gp);
33972ee35494Smrg	gp.param = I915_PARAM_MIN_EU_IN_POOL;
33982ee35494Smrg	gp.value = &ret;
33992ee35494Smrg	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
34002ee35494Smrg		return -errno;
34012ee35494Smrg
34022ee35494Smrg	return ret;
34032ee35494Smrg}
34042ee35494Smrg
340520131375Smrg/**
340620131375Smrg * Annotate the given bo for use in aub dumping.
340720131375Smrg *
340820131375Smrg * \param annotations is an array of drm_intel_aub_annotation objects
340920131375Smrg * describing the type of data in various sections of the bo.  Each
341020131375Smrg * element of the array specifies the type and subtype of a section of
341120131375Smrg * the bo, and the past-the-end offset of that section.  The elements
341220131375Smrg * of \c annotations must be sorted so that ending_offset is
341320131375Smrg * increasing.
341420131375Smrg *
341520131375Smrg * \param count is the number of elements in the \c annotations array.
341620131375Smrg * If \c count is zero, then \c annotations will not be dereferenced.
341720131375Smrg *
341820131375Smrg * Annotations are copied into a private data structure, so caller may
341920131375Smrg * re-use the memory pointed to by \c annotations after the call
342020131375Smrg * returns.
342120131375Smrg *
342220131375Smrg * Annotations are stored for the lifetime of the bo; to reset to the
342320131375Smrg * default state (no annotations), call this function with a \c count
342420131375Smrg * of zero.
342520131375Smrg */
34266260e5d5Smrgdrm_public void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
342720131375Smrg					 drm_intel_aub_annotation *annotations,
342820131375Smrg					 unsigned count)
342920131375Smrg{
343020131375Smrg}
343120131375Smrg
3432a884aba1Smrgstatic pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3433a884aba1Smrgstatic drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3434a884aba1Smrg
3435a884aba1Smrgstatic drm_intel_bufmgr_gem *
3436a884aba1Smrgdrm_intel_bufmgr_gem_find(int fd)
3437a884aba1Smrg{
3438a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
3439a884aba1Smrg
3440a884aba1Smrg	DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3441a884aba1Smrg		if (bufmgr_gem->fd == fd) {
3442a884aba1Smrg			atomic_inc(&bufmgr_gem->refcount);
3443a884aba1Smrg			return bufmgr_gem;
3444a884aba1Smrg		}
3445a884aba1Smrg	}
3446a884aba1Smrg
3447a884aba1Smrg	return NULL;
3448a884aba1Smrg}
3449a884aba1Smrg
3450a884aba1Smrgstatic void
3451a884aba1Smrgdrm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3452a884aba1Smrg{
3453a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3454a884aba1Smrg
3455a884aba1Smrg	if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3456a884aba1Smrg		pthread_mutex_lock(&bufmgr_list_mutex);
3457a884aba1Smrg
3458a884aba1Smrg		if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3459a884aba1Smrg			DRMLISTDEL(&bufmgr_gem->managers);
3460a884aba1Smrg			drm_intel_bufmgr_gem_destroy(bufmgr);
3461a884aba1Smrg		}
3462a884aba1Smrg
3463a884aba1Smrg		pthread_mutex_unlock(&bufmgr_list_mutex);
3464a884aba1Smrg	}
3465a884aba1Smrg}
3466a884aba1Smrg
34676260e5d5Smrgdrm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
34682ee35494Smrg{
34692ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
34702ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
34712ee35494Smrg
34722ee35494Smrg	if (bo_gem->gtt_virtual)
34732ee35494Smrg		return bo_gem->gtt_virtual;
34742ee35494Smrg
34752ee35494Smrg	if (bo_gem->is_userptr)
34762ee35494Smrg		return NULL;
34772ee35494Smrg
34782ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
34792ee35494Smrg	if (bo_gem->gtt_virtual == NULL) {
34802ee35494Smrg		struct drm_i915_gem_mmap_gtt mmap_arg;
34812ee35494Smrg		void *ptr;
34822ee35494Smrg
34832ee35494Smrg		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
34842ee35494Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
34852ee35494Smrg
34862ee35494Smrg		if (bo_gem->map_count++ == 0)
34872ee35494Smrg			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
34882ee35494Smrg
34892ee35494Smrg		memclear(mmap_arg);
34902ee35494Smrg		mmap_arg.handle = bo_gem->gem_handle;
34912ee35494Smrg
34922ee35494Smrg		/* Get the fake offset back... */
34932ee35494Smrg		ptr = MAP_FAILED;
34942ee35494Smrg		if (drmIoctl(bufmgr_gem->fd,
34952ee35494Smrg			     DRM_IOCTL_I915_GEM_MMAP_GTT,
34962ee35494Smrg			     &mmap_arg) == 0) {
34972ee35494Smrg			/* and mmap it */
34982ee35494Smrg			ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
34992ee35494Smrg				       MAP_SHARED, bufmgr_gem->fd,
35002ee35494Smrg				       mmap_arg.offset);
35012ee35494Smrg		}
35022ee35494Smrg		if (ptr == MAP_FAILED) {
35032ee35494Smrg			if (--bo_gem->map_count == 0)
35042ee35494Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
35052ee35494Smrg			ptr = NULL;
35062ee35494Smrg		}
35072ee35494Smrg
35082ee35494Smrg		bo_gem->gtt_virtual = ptr;
35092ee35494Smrg	}
35102ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
35112ee35494Smrg
35122ee35494Smrg	return bo_gem->gtt_virtual;
35132ee35494Smrg}
35142ee35494Smrg
35156260e5d5Smrgdrm_public void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
35162ee35494Smrg{
35172ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
35182ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
35192ee35494Smrg
35202ee35494Smrg	if (bo_gem->mem_virtual)
35212ee35494Smrg		return bo_gem->mem_virtual;
35222ee35494Smrg
35232ee35494Smrg	if (bo_gem->is_userptr) {
35242ee35494Smrg		/* Return the same user ptr */
35252ee35494Smrg		return bo_gem->user_virtual;
35262ee35494Smrg	}
35272ee35494Smrg
35282ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
35292ee35494Smrg	if (!bo_gem->mem_virtual) {
35302ee35494Smrg		struct drm_i915_gem_mmap mmap_arg;
35312ee35494Smrg
35322ee35494Smrg		if (bo_gem->map_count++ == 0)
35332ee35494Smrg			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
35342ee35494Smrg
35352ee35494Smrg		DBG("bo_map: %d (%s), map_count=%d\n",
35362ee35494Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
35372ee35494Smrg
35382ee35494Smrg		memclear(mmap_arg);
35392ee35494Smrg		mmap_arg.handle = bo_gem->gem_handle;
35402ee35494Smrg		mmap_arg.size = bo->size;
35412ee35494Smrg		if (drmIoctl(bufmgr_gem->fd,
35422ee35494Smrg			     DRM_IOCTL_I915_GEM_MMAP,
35432ee35494Smrg			     &mmap_arg)) {
35442ee35494Smrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
35452ee35494Smrg			    __FILE__, __LINE__, bo_gem->gem_handle,
35462ee35494Smrg			    bo_gem->name, strerror(errno));
35472ee35494Smrg			if (--bo_gem->map_count == 0)
35482ee35494Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
35492ee35494Smrg		} else {
35502ee35494Smrg			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
35512ee35494Smrg			bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
35522ee35494Smrg		}
35532ee35494Smrg	}
35542ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
35552ee35494Smrg
35562ee35494Smrg	return bo_gem->mem_virtual;
35572ee35494Smrg}
35582ee35494Smrg
35596260e5d5Smrgdrm_public void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
35602ee35494Smrg{
35612ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
35622ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
35632ee35494Smrg
35642ee35494Smrg	if (bo_gem->wc_virtual)
35652ee35494Smrg		return bo_gem->wc_virtual;
35662ee35494Smrg
35672ee35494Smrg	if (bo_gem->is_userptr)
35682ee35494Smrg		return NULL;
35692ee35494Smrg
35702ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
35712ee35494Smrg	if (!bo_gem->wc_virtual) {
35722ee35494Smrg		struct drm_i915_gem_mmap mmap_arg;
35732ee35494Smrg
35742ee35494Smrg		if (bo_gem->map_count++ == 0)
35752ee35494Smrg			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
35762ee35494Smrg
35772ee35494Smrg		DBG("bo_map: %d (%s), map_count=%d\n",
35782ee35494Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
35792ee35494Smrg
35802ee35494Smrg		memclear(mmap_arg);
35812ee35494Smrg		mmap_arg.handle = bo_gem->gem_handle;
35822ee35494Smrg		mmap_arg.size = bo->size;
35832ee35494Smrg		mmap_arg.flags = I915_MMAP_WC;
35842ee35494Smrg		if (drmIoctl(bufmgr_gem->fd,
35852ee35494Smrg			     DRM_IOCTL_I915_GEM_MMAP,
35862ee35494Smrg			     &mmap_arg)) {
35872ee35494Smrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
35882ee35494Smrg			    __FILE__, __LINE__, bo_gem->gem_handle,
35892ee35494Smrg			    bo_gem->name, strerror(errno));
35902ee35494Smrg			if (--bo_gem->map_count == 0)
35912ee35494Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
35922ee35494Smrg		} else {
35932ee35494Smrg			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
35942ee35494Smrg			bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
35952ee35494Smrg		}
35962ee35494Smrg	}
35972ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
35982ee35494Smrg
35992ee35494Smrg	return bo_gem->wc_virtual;
36002ee35494Smrg}
36012ee35494Smrg
360222944501Smrg/**
360322944501Smrg * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
360422944501Smrg * and manage map buffer objections.
360522944501Smrg *
360622944501Smrg * \param fd File descriptor of the opened DRM device.
360722944501Smrg */
36086260e5d5Smrgdrm_public drm_intel_bufmgr *
360922944501Smrgdrm_intel_bufmgr_gem_init(int fd, int batch_size)
361022944501Smrg{
361122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
361222944501Smrg	struct drm_i915_gem_get_aperture aperture;
361322944501Smrg	drm_i915_getparam_t gp;
361420131375Smrg	int ret, tmp;
361520131375Smrg	bool exec2 = false;
361622944501Smrg
3617a884aba1Smrg	pthread_mutex_lock(&bufmgr_list_mutex);
3618a884aba1Smrg
3619a884aba1Smrg	bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3620a884aba1Smrg	if (bufmgr_gem)
3621a884aba1Smrg		goto exit;
3622a884aba1Smrg
362322944501Smrg	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
362422944501Smrg	if (bufmgr_gem == NULL)
3625a884aba1Smrg		goto exit;
362622944501Smrg
362722944501Smrg	bufmgr_gem->fd = fd;
3628a884aba1Smrg	atomic_set(&bufmgr_gem->refcount, 1);
362922944501Smrg
363022944501Smrg	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
363122944501Smrg		free(bufmgr_gem);
3632a884aba1Smrg		bufmgr_gem = NULL;
3633a884aba1Smrg		goto exit;
363422944501Smrg	}
363522944501Smrg
3636424e9256Smrg	memclear(aperture);
36376d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
36386d98c517Smrg		       DRM_IOCTL_I915_GEM_GET_APERTURE,
36396d98c517Smrg		       &aperture);
364022944501Smrg
364122944501Smrg	if (ret == 0)
364222944501Smrg		bufmgr_gem->gtt_size = aperture.aper_available_size;
364322944501Smrg	else {
364422944501Smrg		fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
364522944501Smrg			strerror(errno));
364622944501Smrg		bufmgr_gem->gtt_size = 128 * 1024 * 1024;
364722944501Smrg		fprintf(stderr, "Assuming %dkB available aperture size.\n"
364822944501Smrg			"May lead to reduced performance or incorrect "
364922944501Smrg			"rendering.\n",
365022944501Smrg			(int)bufmgr_gem->gtt_size / 1024);
365122944501Smrg	}
365222944501Smrg
365320131375Smrg	bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
365422944501Smrg
365520131375Smrg	if (IS_GEN2(bufmgr_gem->pci_device))
365622944501Smrg		bufmgr_gem->gen = 2;
365720131375Smrg	else if (IS_GEN3(bufmgr_gem->pci_device))
365822944501Smrg		bufmgr_gem->gen = 3;
365920131375Smrg	else if (IS_GEN4(bufmgr_gem->pci_device))
366022944501Smrg		bufmgr_gem->gen = 4;
366120131375Smrg	else if (IS_GEN5(bufmgr_gem->pci_device))
366220131375Smrg		bufmgr_gem->gen = 5;
366320131375Smrg	else if (IS_GEN6(bufmgr_gem->pci_device))
366422944501Smrg		bufmgr_gem->gen = 6;
366520131375Smrg	else if (IS_GEN7(bufmgr_gem->pci_device))
366620131375Smrg		bufmgr_gem->gen = 7;
366720131375Smrg	else if (IS_GEN8(bufmgr_gem->pci_device))
366820131375Smrg		bufmgr_gem->gen = 8;
36696260e5d5Smrg	else if (!intel_get_genx(bufmgr_gem->pci_device, &bufmgr_gem->gen)) {
367020131375Smrg		free(bufmgr_gem);
3671a884aba1Smrg		bufmgr_gem = NULL;
3672a884aba1Smrg		goto exit;
367320131375Smrg	}
367420131375Smrg
367520131375Smrg	if (IS_GEN3(bufmgr_gem->pci_device) &&
367620131375Smrg	    bufmgr_gem->gtt_size > 256*1024*1024) {
367720131375Smrg		/* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
367820131375Smrg		 * be used for tiled blits. To simplify the accounting, just
3679fe517fc9Smrg		 * subtract the unmappable part (fixed to 256MB on all known
368020131375Smrg		 * gen3 devices) if the kernel advertises it. */
368120131375Smrg		bufmgr_gem->gtt_size -= 256*1024*1024;
368220131375Smrg	}
368320131375Smrg
3684424e9256Smrg	memclear(gp);
368520131375Smrg	gp.value = &tmp;
368622944501Smrg
368722944501Smrg	gp.param = I915_PARAM_HAS_EXECBUF2;
36886d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
368922944501Smrg	if (!ret)
369020131375Smrg		exec2 = true;
369122944501Smrg
3692aaba2545Smrg	gp.param = I915_PARAM_HAS_BSD;
36936d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
36949ce4edccSmrg	bufmgr_gem->has_bsd = ret == 0;
36959ce4edccSmrg
36969ce4edccSmrg	gp.param = I915_PARAM_HAS_BLT;
36979ce4edccSmrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
36989ce4edccSmrg	bufmgr_gem->has_blt = ret == 0;
36999ce4edccSmrg
37009ce4edccSmrg	gp.param = I915_PARAM_HAS_RELAXED_FENCING;
37019ce4edccSmrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
37029ce4edccSmrg	bufmgr_gem->has_relaxed_fencing = ret == 0;
3703aaba2545Smrg
37042ee35494Smrg	gp.param = I915_PARAM_HAS_EXEC_ASYNC;
37052ee35494Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
37062ee35494Smrg	bufmgr_gem->has_exec_async = ret == 0;
37072ee35494Smrg
3708424e9256Smrg	bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3709a884aba1Smrg
371020131375Smrg	gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
371120131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
371220131375Smrg	bufmgr_gem->has_wait_timeout = ret == 0;
371320131375Smrg
371420131375Smrg	gp.param = I915_PARAM_HAS_LLC;
371520131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
371620131375Smrg	if (ret != 0) {
371720131375Smrg		/* Kernel does not supports HAS_LLC query, fallback to GPU
371820131375Smrg		 * generation detection and assume that we have LLC on GEN6/7
371920131375Smrg		 */
372020131375Smrg		bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
372120131375Smrg				IS_GEN7(bufmgr_gem->pci_device));
372220131375Smrg	} else
372320131375Smrg		bufmgr_gem->has_llc = *gp.value;
372420131375Smrg
372520131375Smrg	gp.param = I915_PARAM_HAS_VEBOX;
372620131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
372720131375Smrg	bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
372820131375Smrg
3729fe517fc9Smrg	gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3730fe517fc9Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3731fe517fc9Smrg	if (ret == 0 && *gp.value > 0)
3732fe517fc9Smrg		bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3733fe517fc9Smrg
373422944501Smrg	if (bufmgr_gem->gen < 4) {
373522944501Smrg		gp.param = I915_PARAM_NUM_FENCES_AVAIL;
373622944501Smrg		gp.value = &bufmgr_gem->available_fences;
37376d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
373822944501Smrg		if (ret) {
373922944501Smrg			fprintf(stderr, "get fences failed: %d [%d]\n", ret,
374022944501Smrg				errno);
374122944501Smrg			fprintf(stderr, "param: %d, val: %d\n", gp.param,
374222944501Smrg				*gp.value);
374322944501Smrg			bufmgr_gem->available_fences = 0;
374422944501Smrg		} else {
374522944501Smrg			/* XXX The kernel reports the total number of fences,
374622944501Smrg			 * including any that may be pinned.
374722944501Smrg			 *
374822944501Smrg			 * We presume that there will be at least one pinned
374922944501Smrg			 * fence for the scanout buffer, but there may be more
375022944501Smrg			 * than one scanout and the user may be manually
375122944501Smrg			 * pinning buffers. Let's move to execbuffer2 and
375222944501Smrg			 * thereby forget the insanity of using fences...
375322944501Smrg			 */
375422944501Smrg			bufmgr_gem->available_fences -= 2;
375522944501Smrg			if (bufmgr_gem->available_fences < 0)
375622944501Smrg				bufmgr_gem->available_fences = 0;
375722944501Smrg		}
375822944501Smrg	}
375922944501Smrg
3760fe517fc9Smrg	if (bufmgr_gem->gen >= 8) {
3761fe517fc9Smrg		gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3762fe517fc9Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3763fe517fc9Smrg		if (ret == 0 && *gp.value == 3)
3764fe517fc9Smrg			bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3765fe517fc9Smrg	}
3766fe517fc9Smrg
376722944501Smrg	/* Let's go with one relocation per every 2 dwords (but round down a bit
376822944501Smrg	 * since a power of two will mean an extra page allocation for the reloc
376922944501Smrg	 * buffer).
377022944501Smrg	 *
377122944501Smrg	 * Every 4 was too few for the blender benchmark.
377222944501Smrg	 */
377322944501Smrg	bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
377422944501Smrg
377522944501Smrg	bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
377622944501Smrg	bufmgr_gem->bufmgr.bo_alloc_for_render =
377722944501Smrg	    drm_intel_gem_bo_alloc_for_render;
377822944501Smrg	bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
377922944501Smrg	bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
378022944501Smrg	bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
378122944501Smrg	bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
378222944501Smrg	bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
378322944501Smrg	bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
378422944501Smrg	bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
378522944501Smrg	bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
378622944501Smrg	bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
378722944501Smrg	bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
378822944501Smrg	bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
378922944501Smrg	bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
379022944501Smrg	bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
379122944501Smrg	bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
379222944501Smrg	bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
379322944501Smrg	/* Use the new one if available */
3794aaba2545Smrg	if (exec2) {
379522944501Smrg		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
37969ce4edccSmrg		bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3797aaba2545Smrg	} else
379822944501Smrg		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
379922944501Smrg	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
380022944501Smrg	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3801a884aba1Smrg	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
380222944501Smrg	bufmgr_gem->bufmgr.debug = 0;
380322944501Smrg	bufmgr_gem->bufmgr.check_aperture_space =
380422944501Smrg	    drm_intel_gem_check_aperture_space;
380522944501Smrg	bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3806aaba2545Smrg	bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
380722944501Smrg	bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
380822944501Smrg	    drm_intel_gem_get_pipe_from_crtc_id;
380922944501Smrg	bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
381022944501Smrg
3811aaba2545Smrg	init_cache_buckets(bufmgr_gem);
381222944501Smrg
381320131375Smrg	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
381420131375Smrg	bufmgr_gem->vma_max = -1; /* unlimited by default */
381520131375Smrg
3816a884aba1Smrg	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3817a884aba1Smrg
3818a884aba1Smrgexit:
3819a884aba1Smrg	pthread_mutex_unlock(&bufmgr_list_mutex);
3820a884aba1Smrg
3821a884aba1Smrg	return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
382222944501Smrg}
3823