intel_bufmgr_gem.c revision a884aba1
122944501Smrg/**************************************************************************
222944501Smrg *
322944501Smrg * Copyright � 2007 Red Hat Inc.
420131375Smrg * Copyright � 2007-2012 Intel Corporation
522944501Smrg * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
622944501Smrg * All Rights Reserved.
722944501Smrg *
822944501Smrg * Permission is hereby granted, free of charge, to any person obtaining a
922944501Smrg * copy of this software and associated documentation files (the
1022944501Smrg * "Software"), to deal in the Software without restriction, including
1122944501Smrg * without limitation the rights to use, copy, modify, merge, publish,
1222944501Smrg * distribute, sub license, and/or sell copies of the Software, and to
1322944501Smrg * permit persons to whom the Software is furnished to do so, subject to
1422944501Smrg * the following conditions:
1522944501Smrg *
1622944501Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1722944501Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1822944501Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1922944501Smrg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
2022944501Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
2122944501Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2222944501Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
2322944501Smrg *
2422944501Smrg * The above copyright notice and this permission notice (including the
2522944501Smrg * next paragraph) shall be included in all copies or substantial portions
2622944501Smrg * of the Software.
2722944501Smrg *
2822944501Smrg *
2922944501Smrg **************************************************************************/
3022944501Smrg/*
3122944501Smrg * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
3222944501Smrg *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
3322944501Smrg *	    Eric Anholt <eric@anholt.net>
3422944501Smrg *	    Dave Airlie <airlied@linux.ie>
3522944501Smrg */
3622944501Smrg
3722944501Smrg#ifdef HAVE_CONFIG_H
3822944501Smrg#include "config.h"
3922944501Smrg#endif
4022944501Smrg
4122944501Smrg#include <xf86drm.h>
4222944501Smrg#include <xf86atomic.h>
4322944501Smrg#include <fcntl.h>
4422944501Smrg#include <stdio.h>
4522944501Smrg#include <stdlib.h>
4622944501Smrg#include <string.h>
4722944501Smrg#include <unistd.h>
4822944501Smrg#include <assert.h>
4922944501Smrg#include <pthread.h>
502e6867f6Smrg#include <stddef.h>
5122944501Smrg#include <sys/ioctl.h>
5222944501Smrg#include <sys/stat.h>
5322944501Smrg#include <sys/types.h>
5420131375Smrg#include <stdbool.h>
5522944501Smrg
5622944501Smrg#include "errno.h"
5720131375Smrg#ifndef ETIME
5820131375Smrg#define ETIME ETIMEDOUT
5920131375Smrg#endif
60a884aba1Smrg#include "libdrm.h"
6122944501Smrg#include "libdrm_lists.h"
6222944501Smrg#include "intel_bufmgr.h"
6322944501Smrg#include "intel_bufmgr_priv.h"
6422944501Smrg#include "intel_chipset.h"
6520131375Smrg#include "intel_aub.h"
6622944501Smrg#include "string.h"
6722944501Smrg
6822944501Smrg#include "i915_drm.h"
6922944501Smrg
7020131375Smrg#ifdef HAVE_VALGRIND
7120131375Smrg#include <valgrind.h>
7220131375Smrg#include <memcheck.h>
7320131375Smrg#define VG(x) x
7420131375Smrg#else
7520131375Smrg#define VG(x)
7620131375Smrg#endif
7720131375Smrg
7820131375Smrg#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
7920131375Smrg
8022944501Smrg#define DBG(...) do {					\
8122944501Smrg	if (bufmgr_gem->bufmgr.debug)			\
8222944501Smrg		fprintf(stderr, __VA_ARGS__);		\
8322944501Smrg} while (0)
8422944501Smrg
85aaba2545Smrg#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86aaba2545Smrg
8722944501Smrgtypedef struct _drm_intel_bo_gem drm_intel_bo_gem;
8822944501Smrg
8922944501Smrgstruct drm_intel_gem_bo_bucket {
9022944501Smrg	drmMMListHead head;
9122944501Smrg	unsigned long size;
9222944501Smrg};
9322944501Smrg
9422944501Smrgtypedef struct _drm_intel_bufmgr_gem {
9522944501Smrg	drm_intel_bufmgr bufmgr;
9622944501Smrg
97a884aba1Smrg	atomic_t refcount;
98a884aba1Smrg
9922944501Smrg	int fd;
10022944501Smrg
10122944501Smrg	int max_relocs;
10222944501Smrg
10322944501Smrg	pthread_mutex_t lock;
10422944501Smrg
10522944501Smrg	struct drm_i915_gem_exec_object *exec_objects;
10622944501Smrg	struct drm_i915_gem_exec_object2 *exec2_objects;
10722944501Smrg	drm_intel_bo **exec_bos;
10822944501Smrg	int exec_size;
10922944501Smrg	int exec_count;
11022944501Smrg
11122944501Smrg	/** Array of lists of cached gem objects of power-of-two sizes */
112aaba2545Smrg	struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
113aaba2545Smrg	int num_buckets;
1146d98c517Smrg	time_t time;
11522944501Smrg
116a884aba1Smrg	drmMMListHead managers;
117a884aba1Smrg
11820131375Smrg	drmMMListHead named;
11920131375Smrg	drmMMListHead vma_cache;
12020131375Smrg	int vma_count, vma_open, vma_max;
12120131375Smrg
12222944501Smrg	uint64_t gtt_size;
12322944501Smrg	int available_fences;
12422944501Smrg	int pci_device;
12522944501Smrg	int gen;
1269ce4edccSmrg	unsigned int has_bsd : 1;
1279ce4edccSmrg	unsigned int has_blt : 1;
1289ce4edccSmrg	unsigned int has_relaxed_fencing : 1;
12920131375Smrg	unsigned int has_llc : 1;
13020131375Smrg	unsigned int has_wait_timeout : 1;
1319ce4edccSmrg	unsigned int bo_reuse : 1;
13220131375Smrg	unsigned int no_exec : 1;
13320131375Smrg	unsigned int has_vebox : 1;
13420131375Smrg	bool fenced_relocs;
13520131375Smrg
13620131375Smrg	char *aub_filename;
13720131375Smrg	FILE *aub_file;
13820131375Smrg	uint32_t aub_offset;
13922944501Smrg} drm_intel_bufmgr_gem;
14022944501Smrg
14122944501Smrg#define DRM_INTEL_RELOC_FENCE (1<<0)
14222944501Smrg
14322944501Smrgtypedef struct _drm_intel_reloc_target_info {
14422944501Smrg	drm_intel_bo *bo;
14522944501Smrg	int flags;
14622944501Smrg} drm_intel_reloc_target;
14722944501Smrg
14822944501Smrgstruct _drm_intel_bo_gem {
14922944501Smrg	drm_intel_bo bo;
15022944501Smrg
15122944501Smrg	atomic_t refcount;
15222944501Smrg	uint32_t gem_handle;
15322944501Smrg	const char *name;
15422944501Smrg
15522944501Smrg	/**
15622944501Smrg	 * Kenel-assigned global name for this object
15720131375Smrg         *
15820131375Smrg         * List contains both flink named and prime fd'd objects
15922944501Smrg	 */
16022944501Smrg	unsigned int global_name;
16120131375Smrg	drmMMListHead name_list;
16222944501Smrg
16322944501Smrg	/**
16422944501Smrg	 * Index of the buffer within the validation list while preparing a
16522944501Smrg	 * batchbuffer execution.
16622944501Smrg	 */
16722944501Smrg	int validate_index;
16822944501Smrg
16922944501Smrg	/**
17022944501Smrg	 * Current tiling mode
17122944501Smrg	 */
17222944501Smrg	uint32_t tiling_mode;
17322944501Smrg	uint32_t swizzle_mode;
1746d98c517Smrg	unsigned long stride;
17522944501Smrg
17622944501Smrg	time_t free_time;
17722944501Smrg
17822944501Smrg	/** Array passed to the DRM containing relocation information. */
17922944501Smrg	struct drm_i915_gem_relocation_entry *relocs;
18022944501Smrg	/**
18122944501Smrg	 * Array of info structs corresponding to relocs[i].target_handle etc
18222944501Smrg	 */
18322944501Smrg	drm_intel_reloc_target *reloc_target_info;
18422944501Smrg	/** Number of entries in relocs */
18522944501Smrg	int reloc_count;
18622944501Smrg	/** Mapped address for the buffer, saved across map/unmap cycles */
18722944501Smrg	void *mem_virtual;
18822944501Smrg	/** GTT virtual address for the buffer, saved across map/unmap cycles */
18922944501Smrg	void *gtt_virtual;
190a884aba1Smrg	/**
191a884aba1Smrg	 * Virtual address of the buffer allocated by user, used for userptr
192a884aba1Smrg	 * objects only.
193a884aba1Smrg	 */
194a884aba1Smrg	void *user_virtual;
19520131375Smrg	int map_count;
19620131375Smrg	drmMMListHead vma_list;
19722944501Smrg
19822944501Smrg	/** BO cache list */
19922944501Smrg	drmMMListHead head;
20022944501Smrg
20122944501Smrg	/**
20222944501Smrg	 * Boolean of whether this BO and its children have been included in
20322944501Smrg	 * the current drm_intel_bufmgr_check_aperture_space() total.
20422944501Smrg	 */
20520131375Smrg	bool included_in_check_aperture;
20622944501Smrg
20722944501Smrg	/**
20822944501Smrg	 * Boolean of whether this buffer has been used as a relocation
20922944501Smrg	 * target and had its size accounted for, and thus can't have any
21022944501Smrg	 * further relocations added to it.
21122944501Smrg	 */
21220131375Smrg	bool used_as_reloc_target;
21322944501Smrg
21422944501Smrg	/**
21522944501Smrg	 * Boolean of whether we have encountered an error whilst building the relocation tree.
21622944501Smrg	 */
21720131375Smrg	bool has_error;
21822944501Smrg
21922944501Smrg	/**
22022944501Smrg	 * Boolean of whether this buffer can be re-used
22122944501Smrg	 */
22220131375Smrg	bool reusable;
22320131375Smrg
22420131375Smrg	/**
22520131375Smrg	 * Boolean of whether the GPU is definitely not accessing the buffer.
22620131375Smrg	 *
22720131375Smrg	 * This is only valid when reusable, since non-reusable
22820131375Smrg	 * buffers are those that have been shared wth other
22920131375Smrg	 * processes, so we don't know their state.
23020131375Smrg	 */
23120131375Smrg	bool idle;
23222944501Smrg
233a884aba1Smrg	/**
234a884aba1Smrg	 * Boolean of whether this buffer was allocated with userptr
235a884aba1Smrg	 */
236a884aba1Smrg	bool is_userptr;
237a884aba1Smrg
23822944501Smrg	/**
23922944501Smrg	 * Size in bytes of this buffer and its relocation descendents.
24022944501Smrg	 *
24122944501Smrg	 * Used to avoid costly tree walking in
24222944501Smrg	 * drm_intel_bufmgr_check_aperture in the common case.
24322944501Smrg	 */
24422944501Smrg	int reloc_tree_size;
24522944501Smrg
24622944501Smrg	/**
24722944501Smrg	 * Number of potential fence registers required by this buffer and its
24822944501Smrg	 * relocations.
24922944501Smrg	 */
25022944501Smrg	int reloc_tree_fences;
25120131375Smrg
25220131375Smrg	/** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
25320131375Smrg	bool mapped_cpu_write;
25420131375Smrg
25520131375Smrg	uint32_t aub_offset;
25620131375Smrg
25720131375Smrg	drm_intel_aub_annotation *aub_annotations;
25820131375Smrg	unsigned aub_annotation_count;
25922944501Smrg};
26022944501Smrg
26122944501Smrgstatic unsigned int
26222944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
26322944501Smrg
26422944501Smrgstatic unsigned int
26522944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
26622944501Smrg
26722944501Smrgstatic int
26822944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
26922944501Smrg			    uint32_t * swizzle_mode);
27022944501Smrg
27122944501Smrgstatic int
2726d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2736d98c517Smrg				     uint32_t tiling_mode,
2746d98c517Smrg				     uint32_t stride);
27522944501Smrg
27622944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
27722944501Smrg						      time_t time);
27822944501Smrg
27922944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
28022944501Smrg
28122944501Smrgstatic void drm_intel_gem_bo_free(drm_intel_bo *bo);
28222944501Smrg
28322944501Smrgstatic unsigned long
28422944501Smrgdrm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
28522944501Smrg			   uint32_t *tiling_mode)
28622944501Smrg{
28722944501Smrg	unsigned long min_size, max_size;
28822944501Smrg	unsigned long i;
28922944501Smrg
29022944501Smrg	if (*tiling_mode == I915_TILING_NONE)
29122944501Smrg		return size;
29222944501Smrg
29322944501Smrg	/* 965+ just need multiples of page size for tiling */
29422944501Smrg	if (bufmgr_gem->gen >= 4)
29522944501Smrg		return ROUND_UP_TO(size, 4096);
29622944501Smrg
29722944501Smrg	/* Older chips need powers of two, of at least 512k or 1M */
29822944501Smrg	if (bufmgr_gem->gen == 3) {
29922944501Smrg		min_size = 1024*1024;
30022944501Smrg		max_size = 128*1024*1024;
30122944501Smrg	} else {
30222944501Smrg		min_size = 512*1024;
30322944501Smrg		max_size = 64*1024*1024;
30422944501Smrg	}
30522944501Smrg
30622944501Smrg	if (size > max_size) {
30722944501Smrg		*tiling_mode = I915_TILING_NONE;
30822944501Smrg		return size;
30922944501Smrg	}
31022944501Smrg
3119ce4edccSmrg	/* Do we need to allocate every page for the fence? */
3129ce4edccSmrg	if (bufmgr_gem->has_relaxed_fencing)
3139ce4edccSmrg		return ROUND_UP_TO(size, 4096);
3149ce4edccSmrg
31522944501Smrg	for (i = min_size; i < size; i <<= 1)
31622944501Smrg		;
31722944501Smrg
31822944501Smrg	return i;
31922944501Smrg}
32022944501Smrg
32122944501Smrg/*
32222944501Smrg * Round a given pitch up to the minimum required for X tiling on a
32322944501Smrg * given chip.  We use 512 as the minimum to allow for a later tiling
32422944501Smrg * change.
32522944501Smrg */
32622944501Smrgstatic unsigned long
32722944501Smrgdrm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
3286d98c517Smrg			    unsigned long pitch, uint32_t *tiling_mode)
32922944501Smrg{
33022944501Smrg	unsigned long tile_width;
33122944501Smrg	unsigned long i;
33222944501Smrg
33322944501Smrg	/* If untiled, then just align it so that we can do rendering
33422944501Smrg	 * to it with the 3D engine.
33522944501Smrg	 */
3366d98c517Smrg	if (*tiling_mode == I915_TILING_NONE)
33722944501Smrg		return ALIGN(pitch, 64);
33822944501Smrg
33920131375Smrg	if (*tiling_mode == I915_TILING_X
34020131375Smrg			|| (IS_915(bufmgr_gem->pci_device)
34120131375Smrg			    && *tiling_mode == I915_TILING_Y))
34222944501Smrg		tile_width = 512;
34322944501Smrg	else
34422944501Smrg		tile_width = 128;
34522944501Smrg
34622944501Smrg	/* 965 is flexible */
34722944501Smrg	if (bufmgr_gem->gen >= 4)
34822944501Smrg		return ROUND_UP_TO(pitch, tile_width);
34922944501Smrg
3506d98c517Smrg	/* The older hardware has a maximum pitch of 8192 with tiled
3516d98c517Smrg	 * surfaces, so fallback to untiled if it's too large.
3526d98c517Smrg	 */
3536d98c517Smrg	if (pitch > 8192) {
3546d98c517Smrg		*tiling_mode = I915_TILING_NONE;
3556d98c517Smrg		return ALIGN(pitch, 64);
3566d98c517Smrg	}
3576d98c517Smrg
35822944501Smrg	/* Pre-965 needs power of two tile width */
35922944501Smrg	for (i = tile_width; i < pitch; i <<= 1)
36022944501Smrg		;
36122944501Smrg
36222944501Smrg	return i;
36322944501Smrg}
36422944501Smrg
36522944501Smrgstatic struct drm_intel_gem_bo_bucket *
36622944501Smrgdrm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
36722944501Smrg				 unsigned long size)
36822944501Smrg{
36922944501Smrg	int i;
37022944501Smrg
371aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
37222944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
37322944501Smrg		    &bufmgr_gem->cache_bucket[i];
37422944501Smrg		if (bucket->size >= size) {
37522944501Smrg			return bucket;
37622944501Smrg		}
37722944501Smrg	}
37822944501Smrg
37922944501Smrg	return NULL;
38022944501Smrg}
38122944501Smrg
38222944501Smrgstatic void
38322944501Smrgdrm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
38422944501Smrg{
38522944501Smrg	int i, j;
38622944501Smrg
38722944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
38822944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
38922944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
39022944501Smrg
39122944501Smrg		if (bo_gem->relocs == NULL) {
39222944501Smrg			DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
39322944501Smrg			    bo_gem->name);
39422944501Smrg			continue;
39522944501Smrg		}
39622944501Smrg
39722944501Smrg		for (j = 0; j < bo_gem->reloc_count; j++) {
39822944501Smrg			drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
39922944501Smrg			drm_intel_bo_gem *target_gem =
40022944501Smrg			    (drm_intel_bo_gem *) target_bo;
40122944501Smrg
40222944501Smrg			DBG("%2d: %d (%s)@0x%08llx -> "
403d82d45b3Sjoerg			    "%d (%s)@0x%08llx + 0x%08x\n",
40422944501Smrg			    i,
40522944501Smrg			    bo_gem->gem_handle, bo_gem->name,
40622944501Smrg			    (unsigned long long)bo_gem->relocs[j].offset,
40722944501Smrg			    target_gem->gem_handle,
40822944501Smrg			    target_gem->name,
409d82d45b3Sjoerg			    (unsigned long long)target_bo->offset64,
41022944501Smrg			    bo_gem->relocs[j].delta);
41122944501Smrg		}
41222944501Smrg	}
41322944501Smrg}
41422944501Smrg
41522944501Smrgstatic inline void
41622944501Smrgdrm_intel_gem_bo_reference(drm_intel_bo *bo)
41722944501Smrg{
41822944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
41922944501Smrg
42022944501Smrg	atomic_inc(&bo_gem->refcount);
42122944501Smrg}
42222944501Smrg
42322944501Smrg/**
42422944501Smrg * Adds the given buffer to the list of buffers to be validated (moved into the
42522944501Smrg * appropriate memory type) with the next batch submission.
42622944501Smrg *
42722944501Smrg * If a buffer is validated multiple times in a batch submission, it ends up
42822944501Smrg * with the intersection of the memory type flags and the union of the
42922944501Smrg * access flags.
43022944501Smrg */
43122944501Smrgstatic void
43222944501Smrgdrm_intel_add_validate_buffer(drm_intel_bo *bo)
43322944501Smrg{
43422944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
43522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
43622944501Smrg	int index;
43722944501Smrg
43822944501Smrg	if (bo_gem->validate_index != -1)
43922944501Smrg		return;
44022944501Smrg
44122944501Smrg	/* Extend the array of validation entries as necessary. */
44222944501Smrg	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
44322944501Smrg		int new_size = bufmgr_gem->exec_size * 2;
44422944501Smrg
44522944501Smrg		if (new_size == 0)
44622944501Smrg			new_size = 5;
44722944501Smrg
44822944501Smrg		bufmgr_gem->exec_objects =
44922944501Smrg		    realloc(bufmgr_gem->exec_objects,
45022944501Smrg			    sizeof(*bufmgr_gem->exec_objects) * new_size);
45122944501Smrg		bufmgr_gem->exec_bos =
45222944501Smrg		    realloc(bufmgr_gem->exec_bos,
45322944501Smrg			    sizeof(*bufmgr_gem->exec_bos) * new_size);
45422944501Smrg		bufmgr_gem->exec_size = new_size;
45522944501Smrg	}
45622944501Smrg
45722944501Smrg	index = bufmgr_gem->exec_count;
45822944501Smrg	bo_gem->validate_index = index;
45922944501Smrg	/* Fill in array entry */
46022944501Smrg	bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
46122944501Smrg	bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
46222944501Smrg	bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
46322944501Smrg	bufmgr_gem->exec_objects[index].alignment = 0;
46422944501Smrg	bufmgr_gem->exec_objects[index].offset = 0;
46522944501Smrg	bufmgr_gem->exec_bos[index] = bo;
46622944501Smrg	bufmgr_gem->exec_count++;
46722944501Smrg}
46822944501Smrg
46922944501Smrgstatic void
47022944501Smrgdrm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
47122944501Smrg{
47222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
47322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
47422944501Smrg	int index;
47522944501Smrg
47622944501Smrg	if (bo_gem->validate_index != -1) {
47722944501Smrg		if (need_fence)
47822944501Smrg			bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
47922944501Smrg				EXEC_OBJECT_NEEDS_FENCE;
48022944501Smrg		return;
48122944501Smrg	}
48222944501Smrg
48322944501Smrg	/* Extend the array of validation entries as necessary. */
48422944501Smrg	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
48522944501Smrg		int new_size = bufmgr_gem->exec_size * 2;
48622944501Smrg
48722944501Smrg		if (new_size == 0)
48822944501Smrg			new_size = 5;
48922944501Smrg
49022944501Smrg		bufmgr_gem->exec2_objects =
49122944501Smrg			realloc(bufmgr_gem->exec2_objects,
49222944501Smrg				sizeof(*bufmgr_gem->exec2_objects) * new_size);
49322944501Smrg		bufmgr_gem->exec_bos =
49422944501Smrg			realloc(bufmgr_gem->exec_bos,
49522944501Smrg				sizeof(*bufmgr_gem->exec_bos) * new_size);
49622944501Smrg		bufmgr_gem->exec_size = new_size;
49722944501Smrg	}
49822944501Smrg
49922944501Smrg	index = bufmgr_gem->exec_count;
50022944501Smrg	bo_gem->validate_index = index;
50122944501Smrg	/* Fill in array entry */
50222944501Smrg	bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
50322944501Smrg	bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
50422944501Smrg	bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
50522944501Smrg	bufmgr_gem->exec2_objects[index].alignment = 0;
50622944501Smrg	bufmgr_gem->exec2_objects[index].offset = 0;
50722944501Smrg	bufmgr_gem->exec_bos[index] = bo;
50822944501Smrg	bufmgr_gem->exec2_objects[index].flags = 0;
50922944501Smrg	bufmgr_gem->exec2_objects[index].rsvd1 = 0;
51022944501Smrg	bufmgr_gem->exec2_objects[index].rsvd2 = 0;
51122944501Smrg	if (need_fence) {
51222944501Smrg		bufmgr_gem->exec2_objects[index].flags |=
51322944501Smrg			EXEC_OBJECT_NEEDS_FENCE;
51422944501Smrg	}
51522944501Smrg	bufmgr_gem->exec_count++;
51622944501Smrg}
51722944501Smrg
51822944501Smrg#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
51922944501Smrg	sizeof(uint32_t))
52022944501Smrg
52122944501Smrgstatic void
52222944501Smrgdrm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
52322944501Smrg				      drm_intel_bo_gem *bo_gem)
52422944501Smrg{
52522944501Smrg	int size;
52622944501Smrg
52722944501Smrg	assert(!bo_gem->used_as_reloc_target);
52822944501Smrg
52922944501Smrg	/* The older chipsets are far-less flexible in terms of tiling,
53022944501Smrg	 * and require tiled buffer to be size aligned in the aperture.
53122944501Smrg	 * This means that in the worst possible case we will need a hole
53222944501Smrg	 * twice as large as the object in order for it to fit into the
53322944501Smrg	 * aperture. Optimal packing is for wimps.
53422944501Smrg	 */
53522944501Smrg	size = bo_gem->bo.size;
5369ce4edccSmrg	if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
5379ce4edccSmrg		int min_size;
5389ce4edccSmrg
5399ce4edccSmrg		if (bufmgr_gem->has_relaxed_fencing) {
5409ce4edccSmrg			if (bufmgr_gem->gen == 3)
5419ce4edccSmrg				min_size = 1024*1024;
5429ce4edccSmrg			else
5439ce4edccSmrg				min_size = 512*1024;
5449ce4edccSmrg
5459ce4edccSmrg			while (min_size < size)
5469ce4edccSmrg				min_size *= 2;
5479ce4edccSmrg		} else
5489ce4edccSmrg			min_size = size;
5499ce4edccSmrg
5509ce4edccSmrg		/* Account for worst-case alignment. */
5519ce4edccSmrg		size = 2 * min_size;
5529ce4edccSmrg	}
55322944501Smrg
55422944501Smrg	bo_gem->reloc_tree_size = size;
55522944501Smrg}
55622944501Smrg
55722944501Smrgstatic int
55822944501Smrgdrm_intel_setup_reloc_list(drm_intel_bo *bo)
55922944501Smrg{
56022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
56122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
56222944501Smrg	unsigned int max_relocs = bufmgr_gem->max_relocs;
56322944501Smrg
56422944501Smrg	if (bo->size / 4 < max_relocs)
56522944501Smrg		max_relocs = bo->size / 4;
56622944501Smrg
56722944501Smrg	bo_gem->relocs = malloc(max_relocs *
56822944501Smrg				sizeof(struct drm_i915_gem_relocation_entry));
56922944501Smrg	bo_gem->reloc_target_info = malloc(max_relocs *
570aaba2545Smrg					   sizeof(drm_intel_reloc_target));
57122944501Smrg	if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
57220131375Smrg		bo_gem->has_error = true;
57322944501Smrg
57422944501Smrg		free (bo_gem->relocs);
57522944501Smrg		bo_gem->relocs = NULL;
57622944501Smrg
57722944501Smrg		free (bo_gem->reloc_target_info);
57822944501Smrg		bo_gem->reloc_target_info = NULL;
57922944501Smrg
58022944501Smrg		return 1;
58122944501Smrg	}
58222944501Smrg
58322944501Smrg	return 0;
58422944501Smrg}
58522944501Smrg
58622944501Smrgstatic int
58722944501Smrgdrm_intel_gem_bo_busy(drm_intel_bo *bo)
58822944501Smrg{
58922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
59022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
59122944501Smrg	struct drm_i915_gem_busy busy;
59222944501Smrg	int ret;
59322944501Smrg
59420131375Smrg	if (bo_gem->reusable && bo_gem->idle)
59520131375Smrg		return false;
59620131375Smrg
59720131375Smrg	VG_CLEAR(busy);
59822944501Smrg	busy.handle = bo_gem->gem_handle;
59922944501Smrg
6006d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
60120131375Smrg	if (ret == 0) {
60220131375Smrg		bo_gem->idle = !busy.busy;
60320131375Smrg		return busy.busy;
60420131375Smrg	} else {
60520131375Smrg		return false;
60620131375Smrg	}
60722944501Smrg	return (ret == 0 && busy.busy);
60822944501Smrg}
60922944501Smrg
61022944501Smrgstatic int
61122944501Smrgdrm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
61222944501Smrg				  drm_intel_bo_gem *bo_gem, int state)
61322944501Smrg{
61422944501Smrg	struct drm_i915_gem_madvise madv;
61522944501Smrg
61620131375Smrg	VG_CLEAR(madv);
61722944501Smrg	madv.handle = bo_gem->gem_handle;
61822944501Smrg	madv.madv = state;
61922944501Smrg	madv.retained = 1;
6206d98c517Smrg	drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
62122944501Smrg
62222944501Smrg	return madv.retained;
62322944501Smrg}
62422944501Smrg
62522944501Smrgstatic int
62622944501Smrgdrm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
62722944501Smrg{
62822944501Smrg	return drm_intel_gem_bo_madvise_internal
62922944501Smrg		((drm_intel_bufmgr_gem *) bo->bufmgr,
63022944501Smrg		 (drm_intel_bo_gem *) bo,
63122944501Smrg		 madv);
63222944501Smrg}
63322944501Smrg
63422944501Smrg/* drop the oldest entries that have been purged by the kernel */
63522944501Smrgstatic void
63622944501Smrgdrm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
63722944501Smrg				    struct drm_intel_gem_bo_bucket *bucket)
63822944501Smrg{
63922944501Smrg	while (!DRMLISTEMPTY(&bucket->head)) {
64022944501Smrg		drm_intel_bo_gem *bo_gem;
64122944501Smrg
64222944501Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
64322944501Smrg				      bucket->head.next, head);
64422944501Smrg		if (drm_intel_gem_bo_madvise_internal
64522944501Smrg		    (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
64622944501Smrg			break;
64722944501Smrg
64822944501Smrg		DRMLISTDEL(&bo_gem->head);
64922944501Smrg		drm_intel_gem_bo_free(&bo_gem->bo);
65022944501Smrg	}
65122944501Smrg}
65222944501Smrg
65322944501Smrgstatic drm_intel_bo *
65422944501Smrgdrm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
65522944501Smrg				const char *name,
65622944501Smrg				unsigned long size,
6576d98c517Smrg				unsigned long flags,
6586d98c517Smrg				uint32_t tiling_mode,
6596d98c517Smrg				unsigned long stride)
66022944501Smrg{
66122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
66222944501Smrg	drm_intel_bo_gem *bo_gem;
66322944501Smrg	unsigned int page_size = getpagesize();
66422944501Smrg	int ret;
66522944501Smrg	struct drm_intel_gem_bo_bucket *bucket;
66620131375Smrg	bool alloc_from_cache;
66722944501Smrg	unsigned long bo_size;
66820131375Smrg	bool for_render = false;
66922944501Smrg
67022944501Smrg	if (flags & BO_ALLOC_FOR_RENDER)
67120131375Smrg		for_render = true;
67222944501Smrg
67322944501Smrg	/* Round the allocated size up to a power of two number of pages. */
67422944501Smrg	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
67522944501Smrg
67622944501Smrg	/* If we don't have caching at this size, don't actually round the
67722944501Smrg	 * allocation up.
67822944501Smrg	 */
67922944501Smrg	if (bucket == NULL) {
68022944501Smrg		bo_size = size;
68122944501Smrg		if (bo_size < page_size)
68222944501Smrg			bo_size = page_size;
68322944501Smrg	} else {
68422944501Smrg		bo_size = bucket->size;
68522944501Smrg	}
68622944501Smrg
68722944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
68822944501Smrg	/* Get a buffer out of the cache if available */
68922944501Smrgretry:
69020131375Smrg	alloc_from_cache = false;
69122944501Smrg	if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
69222944501Smrg		if (for_render) {
69322944501Smrg			/* Allocate new render-target BOs from the tail (MRU)
69422944501Smrg			 * of the list, as it will likely be hot in the GPU
69522944501Smrg			 * cache and in the aperture for us.
69622944501Smrg			 */
69722944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
69822944501Smrg					      bucket->head.prev, head);
69922944501Smrg			DRMLISTDEL(&bo_gem->head);
70020131375Smrg			alloc_from_cache = true;
70122944501Smrg		} else {
70222944501Smrg			/* For non-render-target BOs (where we're probably
70322944501Smrg			 * going to map it first thing in order to fill it
70422944501Smrg			 * with data), check if the last BO in the cache is
70522944501Smrg			 * unbusy, and only reuse in that case. Otherwise,
70622944501Smrg			 * allocating a new buffer is probably faster than
70722944501Smrg			 * waiting for the GPU to finish.
70822944501Smrg			 */
70922944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
71022944501Smrg					      bucket->head.next, head);
71122944501Smrg			if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
71220131375Smrg				alloc_from_cache = true;
71322944501Smrg				DRMLISTDEL(&bo_gem->head);
71422944501Smrg			}
71522944501Smrg		}
71622944501Smrg
71722944501Smrg		if (alloc_from_cache) {
71822944501Smrg			if (!drm_intel_gem_bo_madvise_internal
71922944501Smrg			    (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
72022944501Smrg				drm_intel_gem_bo_free(&bo_gem->bo);
72122944501Smrg				drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
72222944501Smrg								    bucket);
72322944501Smrg				goto retry;
72422944501Smrg			}
7256d98c517Smrg
7266d98c517Smrg			if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
7276d98c517Smrg								 tiling_mode,
7286d98c517Smrg								 stride)) {
7296d98c517Smrg				drm_intel_gem_bo_free(&bo_gem->bo);
7306d98c517Smrg				goto retry;
7316d98c517Smrg			}
73222944501Smrg		}
73322944501Smrg	}
73422944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
73522944501Smrg
73622944501Smrg	if (!alloc_from_cache) {
73722944501Smrg		struct drm_i915_gem_create create;
73822944501Smrg
73922944501Smrg		bo_gem = calloc(1, sizeof(*bo_gem));
74022944501Smrg		if (!bo_gem)
74122944501Smrg			return NULL;
74222944501Smrg
74322944501Smrg		bo_gem->bo.size = bo_size;
74420131375Smrg
74520131375Smrg		VG_CLEAR(create);
74622944501Smrg		create.size = bo_size;
74722944501Smrg
7486d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
7496d98c517Smrg			       DRM_IOCTL_I915_GEM_CREATE,
7506d98c517Smrg			       &create);
75122944501Smrg		bo_gem->gem_handle = create.handle;
75222944501Smrg		bo_gem->bo.handle = bo_gem->gem_handle;
75322944501Smrg		if (ret != 0) {
75422944501Smrg			free(bo_gem);
75522944501Smrg			return NULL;
75622944501Smrg		}
75722944501Smrg		bo_gem->bo.bufmgr = bufmgr;
7586d98c517Smrg
7596d98c517Smrg		bo_gem->tiling_mode = I915_TILING_NONE;
7606d98c517Smrg		bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
7616d98c517Smrg		bo_gem->stride = 0;
7626d98c517Smrg
7636d98c517Smrg		if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
7646d98c517Smrg							 tiling_mode,
7656d98c517Smrg							 stride)) {
7666d98c517Smrg		    drm_intel_gem_bo_free(&bo_gem->bo);
7676d98c517Smrg		    return NULL;
7686d98c517Smrg		}
76920131375Smrg
77020131375Smrg		DRMINITLISTHEAD(&bo_gem->name_list);
77120131375Smrg		DRMINITLISTHEAD(&bo_gem->vma_list);
77222944501Smrg	}
77322944501Smrg
77422944501Smrg	bo_gem->name = name;
77522944501Smrg	atomic_set(&bo_gem->refcount, 1);
77622944501Smrg	bo_gem->validate_index = -1;
77722944501Smrg	bo_gem->reloc_tree_fences = 0;
77820131375Smrg	bo_gem->used_as_reloc_target = false;
77920131375Smrg	bo_gem->has_error = false;
78020131375Smrg	bo_gem->reusable = true;
78120131375Smrg	bo_gem->aub_annotations = NULL;
78220131375Smrg	bo_gem->aub_annotation_count = 0;
78322944501Smrg
78422944501Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
78522944501Smrg
78622944501Smrg	DBG("bo_create: buf %d (%s) %ldb\n",
78722944501Smrg	    bo_gem->gem_handle, bo_gem->name, size);
78822944501Smrg
78922944501Smrg	return &bo_gem->bo;
79022944501Smrg}
79122944501Smrg
79222944501Smrgstatic drm_intel_bo *
79322944501Smrgdrm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
79422944501Smrg				  const char *name,
79522944501Smrg				  unsigned long size,
79622944501Smrg				  unsigned int alignment)
79722944501Smrg{
79822944501Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
7996d98c517Smrg					       BO_ALLOC_FOR_RENDER,
8006d98c517Smrg					       I915_TILING_NONE, 0);
80122944501Smrg}
80222944501Smrg
80322944501Smrgstatic drm_intel_bo *
80422944501Smrgdrm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
80522944501Smrg		       const char *name,
80622944501Smrg		       unsigned long size,
80722944501Smrg		       unsigned int alignment)
80822944501Smrg{
8096d98c517Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
8106d98c517Smrg					       I915_TILING_NONE, 0);
81122944501Smrg}
81222944501Smrg
81322944501Smrgstatic drm_intel_bo *
81422944501Smrgdrm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
81522944501Smrg			     int x, int y, int cpp, uint32_t *tiling_mode,
81622944501Smrg			     unsigned long *pitch, unsigned long flags)
81722944501Smrg{
81822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
819aaba2545Smrg	unsigned long size, stride;
820aaba2545Smrg	uint32_t tiling;
82122944501Smrg
822aaba2545Smrg	do {
82320131375Smrg		unsigned long aligned_y, height_alignment;
824aaba2545Smrg
825aaba2545Smrg		tiling = *tiling_mode;
826aaba2545Smrg
827aaba2545Smrg		/* If we're tiled, our allocations are in 8 or 32-row blocks,
828aaba2545Smrg		 * so failure to align our height means that we won't allocate
829aaba2545Smrg		 * enough pages.
830aaba2545Smrg		 *
831aaba2545Smrg		 * If we're untiled, we still have to align to 2 rows high
832aaba2545Smrg		 * because the data port accesses 2x2 blocks even if the
833aaba2545Smrg		 * bottom row isn't to be rendered, so failure to align means
834aaba2545Smrg		 * we could walk off the end of the GTT and fault.  This is
835aaba2545Smrg		 * documented on 965, and may be the case on older chipsets
836aaba2545Smrg		 * too so we try to be careful.
837aaba2545Smrg		 */
838aaba2545Smrg		aligned_y = y;
83920131375Smrg		height_alignment = 2;
84020131375Smrg
84120131375Smrg		if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
84220131375Smrg			height_alignment = 16;
84320131375Smrg		else if (tiling == I915_TILING_X
84420131375Smrg			|| (IS_915(bufmgr_gem->pci_device)
84520131375Smrg			    && tiling == I915_TILING_Y))
84620131375Smrg			height_alignment = 8;
847aaba2545Smrg		else if (tiling == I915_TILING_Y)
84820131375Smrg			height_alignment = 32;
84920131375Smrg		aligned_y = ALIGN(y, height_alignment);
850aaba2545Smrg
851aaba2545Smrg		stride = x * cpp;
8526d98c517Smrg		stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
853aaba2545Smrg		size = stride * aligned_y;
854aaba2545Smrg		size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
855aaba2545Smrg	} while (*tiling_mode != tiling);
85622944501Smrg	*pitch = stride;
85722944501Smrg
8586d98c517Smrg	if (tiling == I915_TILING_NONE)
8596d98c517Smrg		stride = 0;
8606d98c517Smrg
8616d98c517Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
8626d98c517Smrg					       tiling, stride);
86322944501Smrg}
86422944501Smrg
865a884aba1Smrgstatic drm_intel_bo *
866a884aba1Smrgdrm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
867a884aba1Smrg				const char *name,
868a884aba1Smrg				void *addr,
869a884aba1Smrg				uint32_t tiling_mode,
870a884aba1Smrg				uint32_t stride,
871a884aba1Smrg				unsigned long size,
872a884aba1Smrg				unsigned long flags)
873a884aba1Smrg{
874a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
875a884aba1Smrg	drm_intel_bo_gem *bo_gem;
876a884aba1Smrg	int ret;
877a884aba1Smrg	struct drm_i915_gem_userptr userptr;
878a884aba1Smrg
879a884aba1Smrg	/* Tiling with userptr surfaces is not supported
880a884aba1Smrg	 * on all hardware so refuse it for time being.
881a884aba1Smrg	 */
882a884aba1Smrg	if (tiling_mode != I915_TILING_NONE)
883a884aba1Smrg		return NULL;
884a884aba1Smrg
885a884aba1Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
886a884aba1Smrg	if (!bo_gem)
887a884aba1Smrg		return NULL;
888a884aba1Smrg
889a884aba1Smrg	bo_gem->bo.size = size;
890a884aba1Smrg
891a884aba1Smrg	VG_CLEAR(userptr);
892a884aba1Smrg	userptr.user_ptr = (__u64)((unsigned long)addr);
893a884aba1Smrg	userptr.user_size = size;
894a884aba1Smrg	userptr.flags = flags;
895a884aba1Smrg
896a884aba1Smrg	ret = drmIoctl(bufmgr_gem->fd,
897a884aba1Smrg			DRM_IOCTL_I915_GEM_USERPTR,
898a884aba1Smrg			&userptr);
899a884aba1Smrg	if (ret != 0) {
900a884aba1Smrg		DBG("bo_create_userptr: "
901a884aba1Smrg		    "ioctl failed with user ptr %p size 0x%lx, "
902a884aba1Smrg		    "user flags 0x%lx\n", addr, size, flags);
903a884aba1Smrg		free(bo_gem);
904a884aba1Smrg		return NULL;
905a884aba1Smrg	}
906a884aba1Smrg
907a884aba1Smrg	bo_gem->gem_handle = userptr.handle;
908a884aba1Smrg	bo_gem->bo.handle = bo_gem->gem_handle;
909a884aba1Smrg	bo_gem->bo.bufmgr    = bufmgr;
910a884aba1Smrg	bo_gem->is_userptr   = true;
911a884aba1Smrg	bo_gem->bo.virtual   = addr;
912a884aba1Smrg	/* Save the address provided by user */
913a884aba1Smrg	bo_gem->user_virtual = addr;
914a884aba1Smrg	bo_gem->tiling_mode  = I915_TILING_NONE;
915a884aba1Smrg	bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
916a884aba1Smrg	bo_gem->stride       = 0;
917a884aba1Smrg
918a884aba1Smrg	DRMINITLISTHEAD(&bo_gem->name_list);
919a884aba1Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
920a884aba1Smrg
921a884aba1Smrg	bo_gem->name = name;
922a884aba1Smrg	atomic_set(&bo_gem->refcount, 1);
923a884aba1Smrg	bo_gem->validate_index = -1;
924a884aba1Smrg	bo_gem->reloc_tree_fences = 0;
925a884aba1Smrg	bo_gem->used_as_reloc_target = false;
926a884aba1Smrg	bo_gem->has_error = false;
927a884aba1Smrg	bo_gem->reusable = false;
928a884aba1Smrg
929a884aba1Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
930a884aba1Smrg
931a884aba1Smrg	DBG("bo_create_userptr: "
932a884aba1Smrg	    "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
933a884aba1Smrg		addr, bo_gem->gem_handle, bo_gem->name,
934a884aba1Smrg		size, stride, tiling_mode);
935a884aba1Smrg
936a884aba1Smrg	return &bo_gem->bo;
937a884aba1Smrg}
938a884aba1Smrg
93922944501Smrg/**
94022944501Smrg * Returns a drm_intel_bo wrapping the given buffer object handle.
94122944501Smrg *
94222944501Smrg * This can be used when one application needs to pass a buffer object
94322944501Smrg * to another.
94422944501Smrg */
945a884aba1Smrgdrm_public drm_intel_bo *
94622944501Smrgdrm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
94722944501Smrg				  const char *name,
94822944501Smrg				  unsigned int handle)
94922944501Smrg{
95022944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
95122944501Smrg	drm_intel_bo_gem *bo_gem;
95222944501Smrg	int ret;
95322944501Smrg	struct drm_gem_open open_arg;
95422944501Smrg	struct drm_i915_gem_get_tiling get_tiling;
95520131375Smrg	drmMMListHead *list;
95622944501Smrg
95720131375Smrg	/* At the moment most applications only have a few named bo.
95820131375Smrg	 * For instance, in a DRI client only the render buffers passed
95920131375Smrg	 * between X and the client are named. And since X returns the
96020131375Smrg	 * alternating names for the front/back buffer a linear search
96120131375Smrg	 * provides a sufficiently fast match.
96220131375Smrg	 */
963a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
96420131375Smrg	for (list = bufmgr_gem->named.next;
96520131375Smrg	     list != &bufmgr_gem->named;
96620131375Smrg	     list = list->next) {
96720131375Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
96820131375Smrg		if (bo_gem->global_name == handle) {
96920131375Smrg			drm_intel_gem_bo_reference(&bo_gem->bo);
970a884aba1Smrg			pthread_mutex_unlock(&bufmgr_gem->lock);
97120131375Smrg			return &bo_gem->bo;
97220131375Smrg		}
97320131375Smrg	}
97422944501Smrg
97520131375Smrg	VG_CLEAR(open_arg);
97622944501Smrg	open_arg.name = handle;
9776d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
9786d98c517Smrg		       DRM_IOCTL_GEM_OPEN,
9796d98c517Smrg		       &open_arg);
98022944501Smrg	if (ret != 0) {
9819ce4edccSmrg		DBG("Couldn't reference %s handle 0x%08x: %s\n",
9829ce4edccSmrg		    name, handle, strerror(errno));
983a884aba1Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
98422944501Smrg		return NULL;
98522944501Smrg	}
98620131375Smrg        /* Now see if someone has used a prime handle to get this
98720131375Smrg         * object from the kernel before by looking through the list
98820131375Smrg         * again for a matching gem_handle
98920131375Smrg         */
99020131375Smrg	for (list = bufmgr_gem->named.next;
99120131375Smrg	     list != &bufmgr_gem->named;
99220131375Smrg	     list = list->next) {
99320131375Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
99420131375Smrg		if (bo_gem->gem_handle == open_arg.handle) {
99520131375Smrg			drm_intel_gem_bo_reference(&bo_gem->bo);
996a884aba1Smrg			pthread_mutex_unlock(&bufmgr_gem->lock);
99720131375Smrg			return &bo_gem->bo;
99820131375Smrg		}
99920131375Smrg	}
100020131375Smrg
100120131375Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
1002a884aba1Smrg	if (!bo_gem) {
1003a884aba1Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
100420131375Smrg		return NULL;
1005a884aba1Smrg	}
100620131375Smrg
100722944501Smrg	bo_gem->bo.size = open_arg.size;
100822944501Smrg	bo_gem->bo.offset = 0;
100920131375Smrg	bo_gem->bo.offset64 = 0;
101022944501Smrg	bo_gem->bo.virtual = NULL;
101122944501Smrg	bo_gem->bo.bufmgr = bufmgr;
101222944501Smrg	bo_gem->name = name;
101322944501Smrg	atomic_set(&bo_gem->refcount, 1);
101422944501Smrg	bo_gem->validate_index = -1;
101522944501Smrg	bo_gem->gem_handle = open_arg.handle;
101620131375Smrg	bo_gem->bo.handle = open_arg.handle;
101722944501Smrg	bo_gem->global_name = handle;
101820131375Smrg	bo_gem->reusable = false;
101922944501Smrg
102020131375Smrg	VG_CLEAR(get_tiling);
102122944501Smrg	get_tiling.handle = bo_gem->gem_handle;
10226d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
10236d98c517Smrg		       DRM_IOCTL_I915_GEM_GET_TILING,
10246d98c517Smrg		       &get_tiling);
102522944501Smrg	if (ret != 0) {
102622944501Smrg		drm_intel_gem_bo_unreference(&bo_gem->bo);
1027a884aba1Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
102822944501Smrg		return NULL;
102922944501Smrg	}
103022944501Smrg	bo_gem->tiling_mode = get_tiling.tiling_mode;
103122944501Smrg	bo_gem->swizzle_mode = get_tiling.swizzle_mode;
10326d98c517Smrg	/* XXX stride is unknown */
103322944501Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
103422944501Smrg
103520131375Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
103620131375Smrg	DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1037a884aba1Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
103822944501Smrg	DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
103922944501Smrg
104022944501Smrg	return &bo_gem->bo;
104122944501Smrg}
104222944501Smrg
104322944501Smrgstatic void
104422944501Smrgdrm_intel_gem_bo_free(drm_intel_bo *bo)
104522944501Smrg{
104622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
104722944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
104822944501Smrg	struct drm_gem_close close;
104922944501Smrg	int ret;
105022944501Smrg
105120131375Smrg	DRMLISTDEL(&bo_gem->vma_list);
105220131375Smrg	if (bo_gem->mem_virtual) {
105320131375Smrg		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1054a884aba1Smrg		drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
105520131375Smrg		bufmgr_gem->vma_count--;
105620131375Smrg	}
105720131375Smrg	if (bo_gem->gtt_virtual) {
1058a884aba1Smrg		drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
105920131375Smrg		bufmgr_gem->vma_count--;
106020131375Smrg	}
106122944501Smrg
106222944501Smrg	/* Close this object */
106320131375Smrg	VG_CLEAR(close);
106422944501Smrg	close.handle = bo_gem->gem_handle;
10656d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
106622944501Smrg	if (ret != 0) {
10679ce4edccSmrg		DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
10689ce4edccSmrg		    bo_gem->gem_handle, bo_gem->name, strerror(errno));
106922944501Smrg	}
107020131375Smrg	free(bo_gem->aub_annotations);
107122944501Smrg	free(bo);
107222944501Smrg}
107322944501Smrg
107420131375Smrgstatic void
107520131375Smrgdrm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
107620131375Smrg{
107720131375Smrg#if HAVE_VALGRIND
107820131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
107920131375Smrg
108020131375Smrg	if (bo_gem->mem_virtual)
108120131375Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
108220131375Smrg
108320131375Smrg	if (bo_gem->gtt_virtual)
108420131375Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
108520131375Smrg#endif
108620131375Smrg}
108720131375Smrg
108822944501Smrg/** Frees all cached buffers significantly older than @time. */
108922944501Smrgstatic void
109022944501Smrgdrm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
109122944501Smrg{
109222944501Smrg	int i;
109322944501Smrg
10946d98c517Smrg	if (bufmgr_gem->time == time)
10956d98c517Smrg		return;
10966d98c517Smrg
1097aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
109822944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
109922944501Smrg		    &bufmgr_gem->cache_bucket[i];
110022944501Smrg
110122944501Smrg		while (!DRMLISTEMPTY(&bucket->head)) {
110222944501Smrg			drm_intel_bo_gem *bo_gem;
110322944501Smrg
110422944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
110522944501Smrg					      bucket->head.next, head);
110622944501Smrg			if (time - bo_gem->free_time <= 1)
110722944501Smrg				break;
110822944501Smrg
110922944501Smrg			DRMLISTDEL(&bo_gem->head);
111022944501Smrg
111122944501Smrg			drm_intel_gem_bo_free(&bo_gem->bo);
111222944501Smrg		}
111322944501Smrg	}
11146d98c517Smrg
11156d98c517Smrg	bufmgr_gem->time = time;
111622944501Smrg}
111722944501Smrg
111820131375Smrgstatic void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
111920131375Smrg{
112020131375Smrg	int limit;
112120131375Smrg
112220131375Smrg	DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
112320131375Smrg	    bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
112420131375Smrg
112520131375Smrg	if (bufmgr_gem->vma_max < 0)
112620131375Smrg		return;
112720131375Smrg
112820131375Smrg	/* We may need to evict a few entries in order to create new mmaps */
112920131375Smrg	limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
113020131375Smrg	if (limit < 0)
113120131375Smrg		limit = 0;
113220131375Smrg
113320131375Smrg	while (bufmgr_gem->vma_count > limit) {
113420131375Smrg		drm_intel_bo_gem *bo_gem;
113520131375Smrg
113620131375Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
113720131375Smrg				      bufmgr_gem->vma_cache.next,
113820131375Smrg				      vma_list);
113920131375Smrg		assert(bo_gem->map_count == 0);
114020131375Smrg		DRMLISTDELINIT(&bo_gem->vma_list);
114120131375Smrg
114220131375Smrg		if (bo_gem->mem_virtual) {
1143a884aba1Smrg			drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
114420131375Smrg			bo_gem->mem_virtual = NULL;
114520131375Smrg			bufmgr_gem->vma_count--;
114620131375Smrg		}
114720131375Smrg		if (bo_gem->gtt_virtual) {
1148a884aba1Smrg			drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
114920131375Smrg			bo_gem->gtt_virtual = NULL;
115020131375Smrg			bufmgr_gem->vma_count--;
115120131375Smrg		}
115220131375Smrg	}
115320131375Smrg}
115420131375Smrg
115520131375Smrgstatic void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
115620131375Smrg				       drm_intel_bo_gem *bo_gem)
115720131375Smrg{
115820131375Smrg	bufmgr_gem->vma_open--;
115920131375Smrg	DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
116020131375Smrg	if (bo_gem->mem_virtual)
116120131375Smrg		bufmgr_gem->vma_count++;
116220131375Smrg	if (bo_gem->gtt_virtual)
116320131375Smrg		bufmgr_gem->vma_count++;
116420131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
116520131375Smrg}
116620131375Smrg
116720131375Smrgstatic void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
116820131375Smrg				      drm_intel_bo_gem *bo_gem)
116920131375Smrg{
117020131375Smrg	bufmgr_gem->vma_open++;
117120131375Smrg	DRMLISTDEL(&bo_gem->vma_list);
117220131375Smrg	if (bo_gem->mem_virtual)
117320131375Smrg		bufmgr_gem->vma_count--;
117420131375Smrg	if (bo_gem->gtt_virtual)
117520131375Smrg		bufmgr_gem->vma_count--;
117620131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
117720131375Smrg}
117820131375Smrg
117922944501Smrgstatic void
118022944501Smrgdrm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
118122944501Smrg{
118222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
118322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
118422944501Smrg	struct drm_intel_gem_bo_bucket *bucket;
118522944501Smrg	int i;
118622944501Smrg
118722944501Smrg	/* Unreference all the target buffers */
118822944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
1189aaba2545Smrg		if (bo_gem->reloc_target_info[i].bo != bo) {
1190aaba2545Smrg			drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1191aaba2545Smrg								  reloc_target_info[i].bo,
1192aaba2545Smrg								  time);
1193aaba2545Smrg		}
119422944501Smrg	}
119522944501Smrg	bo_gem->reloc_count = 0;
119620131375Smrg	bo_gem->used_as_reloc_target = false;
119722944501Smrg
119822944501Smrg	DBG("bo_unreference final: %d (%s)\n",
119922944501Smrg	    bo_gem->gem_handle, bo_gem->name);
120022944501Smrg
120122944501Smrg	/* release memory associated with this object */
120222944501Smrg	if (bo_gem->reloc_target_info) {
120322944501Smrg		free(bo_gem->reloc_target_info);
120422944501Smrg		bo_gem->reloc_target_info = NULL;
120522944501Smrg	}
120622944501Smrg	if (bo_gem->relocs) {
120722944501Smrg		free(bo_gem->relocs);
120822944501Smrg		bo_gem->relocs = NULL;
120922944501Smrg	}
121022944501Smrg
121120131375Smrg	/* Clear any left-over mappings */
121220131375Smrg	if (bo_gem->map_count) {
121320131375Smrg		DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
121420131375Smrg		bo_gem->map_count = 0;
121520131375Smrg		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
121620131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
121720131375Smrg	}
121820131375Smrg
121920131375Smrg	DRMLISTDEL(&bo_gem->name_list);
122020131375Smrg
122122944501Smrg	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
122222944501Smrg	/* Put the buffer into our internal cache for reuse if we can. */
122322944501Smrg	if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
122422944501Smrg	    drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
122522944501Smrg					      I915_MADV_DONTNEED)) {
122622944501Smrg		bo_gem->free_time = time;
122722944501Smrg
122822944501Smrg		bo_gem->name = NULL;
122922944501Smrg		bo_gem->validate_index = -1;
123022944501Smrg
123122944501Smrg		DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
123222944501Smrg	} else {
123322944501Smrg		drm_intel_gem_bo_free(bo);
123422944501Smrg	}
123522944501Smrg}
123622944501Smrg
123722944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
123822944501Smrg						      time_t time)
123922944501Smrg{
124022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
124122944501Smrg
124222944501Smrg	assert(atomic_read(&bo_gem->refcount) > 0);
124322944501Smrg	if (atomic_dec_and_test(&bo_gem->refcount))
124422944501Smrg		drm_intel_gem_bo_unreference_final(bo, time);
124522944501Smrg}
124622944501Smrg
124722944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
124822944501Smrg{
124922944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
125022944501Smrg
125122944501Smrg	assert(atomic_read(&bo_gem->refcount) > 0);
1252a884aba1Smrg
1253a884aba1Smrg	if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
125422944501Smrg		drm_intel_bufmgr_gem *bufmgr_gem =
125522944501Smrg		    (drm_intel_bufmgr_gem *) bo->bufmgr;
125622944501Smrg		struct timespec time;
125722944501Smrg
125822944501Smrg		clock_gettime(CLOCK_MONOTONIC, &time);
125922944501Smrg
126022944501Smrg		pthread_mutex_lock(&bufmgr_gem->lock);
1261a884aba1Smrg
1262a884aba1Smrg		if (atomic_dec_and_test(&bo_gem->refcount)) {
1263a884aba1Smrg			drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1264a884aba1Smrg			drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1265a884aba1Smrg		}
1266a884aba1Smrg
126722944501Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
126822944501Smrg	}
126922944501Smrg}
127022944501Smrg
127122944501Smrgstatic int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
127222944501Smrg{
127322944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
127422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
127522944501Smrg	struct drm_i915_gem_set_domain set_domain;
127622944501Smrg	int ret;
127722944501Smrg
1278a884aba1Smrg	if (bo_gem->is_userptr) {
1279a884aba1Smrg		/* Return the same user ptr */
1280a884aba1Smrg		bo->virtual = bo_gem->user_virtual;
1281a884aba1Smrg		return 0;
1282a884aba1Smrg	}
1283a884aba1Smrg
128422944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
128522944501Smrg
128620131375Smrg	if (bo_gem->map_count++ == 0)
128720131375Smrg		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
128820131375Smrg
128922944501Smrg	if (!bo_gem->mem_virtual) {
129022944501Smrg		struct drm_i915_gem_mmap mmap_arg;
129122944501Smrg
129220131375Smrg		DBG("bo_map: %d (%s), map_count=%d\n",
129320131375Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
129422944501Smrg
129520131375Smrg		VG_CLEAR(mmap_arg);
129622944501Smrg		mmap_arg.handle = bo_gem->gem_handle;
129722944501Smrg		mmap_arg.offset = 0;
129822944501Smrg		mmap_arg.size = bo->size;
12996d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
13006d98c517Smrg			       DRM_IOCTL_I915_GEM_MMAP,
13016d98c517Smrg			       &mmap_arg);
130222944501Smrg		if (ret != 0) {
130322944501Smrg			ret = -errno;
13049ce4edccSmrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
13059ce4edccSmrg			    __FILE__, __LINE__, bo_gem->gem_handle,
13069ce4edccSmrg			    bo_gem->name, strerror(errno));
130720131375Smrg			if (--bo_gem->map_count == 0)
130820131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
130922944501Smrg			pthread_mutex_unlock(&bufmgr_gem->lock);
131022944501Smrg			return ret;
131122944501Smrg		}
131220131375Smrg		VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
131322944501Smrg		bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
131422944501Smrg	}
131522944501Smrg	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
131622944501Smrg	    bo_gem->mem_virtual);
131722944501Smrg	bo->virtual = bo_gem->mem_virtual;
131822944501Smrg
131920131375Smrg	VG_CLEAR(set_domain);
132022944501Smrg	set_domain.handle = bo_gem->gem_handle;
132122944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_CPU;
132222944501Smrg	if (write_enable)
132322944501Smrg		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
132422944501Smrg	else
132522944501Smrg		set_domain.write_domain = 0;
13266d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
13276d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
13286d98c517Smrg		       &set_domain);
132922944501Smrg	if (ret != 0) {
13309ce4edccSmrg		DBG("%s:%d: Error setting to CPU domain %d: %s\n",
13319ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
13329ce4edccSmrg		    strerror(errno));
133322944501Smrg	}
133422944501Smrg
133520131375Smrg	if (write_enable)
133620131375Smrg		bo_gem->mapped_cpu_write = true;
133720131375Smrg
133820131375Smrg	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
133920131375Smrg	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
134022944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
134122944501Smrg
134222944501Smrg	return 0;
134322944501Smrg}
134422944501Smrg
134520131375Smrgstatic int
134620131375Smrgmap_gtt(drm_intel_bo *bo)
134722944501Smrg{
134822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
134922944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
135022944501Smrg	int ret;
135122944501Smrg
1352a884aba1Smrg	if (bo_gem->is_userptr)
1353a884aba1Smrg		return -EINVAL;
1354a884aba1Smrg
135520131375Smrg	if (bo_gem->map_count++ == 0)
135620131375Smrg		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
135722944501Smrg
135822944501Smrg	/* Get a mapping of the buffer if we haven't before. */
135922944501Smrg	if (bo_gem->gtt_virtual == NULL) {
136022944501Smrg		struct drm_i915_gem_mmap_gtt mmap_arg;
136122944501Smrg
136220131375Smrg		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
136320131375Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
136422944501Smrg
136520131375Smrg		VG_CLEAR(mmap_arg);
136622944501Smrg		mmap_arg.handle = bo_gem->gem_handle;
136722944501Smrg
136822944501Smrg		/* Get the fake offset back... */
13696d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
13706d98c517Smrg			       DRM_IOCTL_I915_GEM_MMAP_GTT,
13716d98c517Smrg			       &mmap_arg);
137222944501Smrg		if (ret != 0) {
137322944501Smrg			ret = -errno;
13749ce4edccSmrg			DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
13759ce4edccSmrg			    __FILE__, __LINE__,
13769ce4edccSmrg			    bo_gem->gem_handle, bo_gem->name,
13779ce4edccSmrg			    strerror(errno));
137820131375Smrg			if (--bo_gem->map_count == 0)
137920131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
138022944501Smrg			return ret;
138122944501Smrg		}
138222944501Smrg
138322944501Smrg		/* and mmap it */
1384aec75c42Sriastradh		ret = drmMap(bufmgr_gem->fd, mmap_arg.offset, bo->size,
1385aec75c42Sriastradh		    &bo_gem->gtt_virtual);
1386aec75c42Sriastradh		if (ret) {
138722944501Smrg			bo_gem->gtt_virtual = NULL;
13889ce4edccSmrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
13899ce4edccSmrg			    __FILE__, __LINE__,
13909ce4edccSmrg			    bo_gem->gem_handle, bo_gem->name,
13919ce4edccSmrg			    strerror(errno));
139220131375Smrg			if (--bo_gem->map_count == 0)
139320131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
139422944501Smrg			return ret;
139522944501Smrg		}
139622944501Smrg	}
139722944501Smrg
139822944501Smrg	bo->virtual = bo_gem->gtt_virtual;
139922944501Smrg
140022944501Smrg	DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
140122944501Smrg	    bo_gem->gtt_virtual);
140222944501Smrg
140320131375Smrg	return 0;
140420131375Smrg}
140520131375Smrg
1406a884aba1Smrgdrm_public int
1407a884aba1Smrgdrm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
140820131375Smrg{
140920131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
141020131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
141120131375Smrg	struct drm_i915_gem_set_domain set_domain;
141220131375Smrg	int ret;
141320131375Smrg
141420131375Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
141520131375Smrg
141620131375Smrg	ret = map_gtt(bo);
141720131375Smrg	if (ret) {
141820131375Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
141920131375Smrg		return ret;
142020131375Smrg	}
142120131375Smrg
142220131375Smrg	/* Now move it to the GTT domain so that the GPU and CPU
142320131375Smrg	 * caches are flushed and the GPU isn't actively using the
142420131375Smrg	 * buffer.
142520131375Smrg	 *
142620131375Smrg	 * The pagefault handler does this domain change for us when
142720131375Smrg	 * it has unbound the BO from the GTT, but it's up to us to
142820131375Smrg	 * tell it when we're about to use things if we had done
142920131375Smrg	 * rendering and it still happens to be bound to the GTT.
143020131375Smrg	 */
143120131375Smrg	VG_CLEAR(set_domain);
143222944501Smrg	set_domain.handle = bo_gem->gem_handle;
143322944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
143422944501Smrg	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
14356d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
14366d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
14376d98c517Smrg		       &set_domain);
143822944501Smrg	if (ret != 0) {
14399ce4edccSmrg		DBG("%s:%d: Error setting domain %d: %s\n",
14409ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
14419ce4edccSmrg		    strerror(errno));
144222944501Smrg	}
144322944501Smrg
144420131375Smrg	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
144520131375Smrg	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
144622944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
144722944501Smrg
14486d98c517Smrg	return 0;
144922944501Smrg}
145022944501Smrg
145120131375Smrg/**
145220131375Smrg * Performs a mapping of the buffer object like the normal GTT
145320131375Smrg * mapping, but avoids waiting for the GPU to be done reading from or
145420131375Smrg * rendering to the buffer.
145520131375Smrg *
145620131375Smrg * This is used in the implementation of GL_ARB_map_buffer_range: The
145720131375Smrg * user asks to create a buffer, then does a mapping, fills some
145820131375Smrg * space, runs a drawing command, then asks to map it again without
145920131375Smrg * synchronizing because it guarantees that it won't write over the
146020131375Smrg * data that the GPU is busy using (or, more specifically, that if it
146120131375Smrg * does write over the data, it acknowledges that rendering is
146220131375Smrg * undefined).
146320131375Smrg */
146420131375Smrg
1465a884aba1Smrgdrm_public int
1466a884aba1Smrgdrm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
146722944501Smrg{
146822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
146920131375Smrg#ifdef HAVE_VALGRIND
147020131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
147120131375Smrg#endif
147220131375Smrg	int ret;
147322944501Smrg
147420131375Smrg	/* If the CPU cache isn't coherent with the GTT, then use a
147520131375Smrg	 * regular synchronized mapping.  The problem is that we don't
147620131375Smrg	 * track where the buffer was last used on the CPU side in
147720131375Smrg	 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
147820131375Smrg	 * we would potentially corrupt the buffer even when the user
147920131375Smrg	 * does reasonable things.
148020131375Smrg	 */
148120131375Smrg	if (!bufmgr_gem->has_llc)
148220131375Smrg		return drm_intel_gem_bo_map_gtt(bo);
148322944501Smrg
148422944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
148520131375Smrg
148620131375Smrg	ret = map_gtt(bo);
148720131375Smrg	if (ret == 0) {
148820131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
148920131375Smrg		VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
149020131375Smrg	}
149120131375Smrg
149222944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
149322944501Smrg
149422944501Smrg	return ret;
149522944501Smrg}
149622944501Smrg
149722944501Smrgstatic int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
149822944501Smrg{
1499a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
150022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
150120131375Smrg	int ret = 0;
150222944501Smrg
150322944501Smrg	if (bo == NULL)
150422944501Smrg		return 0;
150522944501Smrg
1506a884aba1Smrg	if (bo_gem->is_userptr)
1507a884aba1Smrg		return 0;
1508a884aba1Smrg
1509a884aba1Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1510a884aba1Smrg
151122944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
151222944501Smrg
151320131375Smrg	if (bo_gem->map_count <= 0) {
151420131375Smrg		DBG("attempted to unmap an unmapped bo\n");
151520131375Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
151620131375Smrg		/* Preserve the old behaviour of just treating this as a
151720131375Smrg		 * no-op rather than reporting the error.
151820131375Smrg		 */
151920131375Smrg		return 0;
152020131375Smrg	}
152120131375Smrg
152220131375Smrg	if (bo_gem->mapped_cpu_write) {
152320131375Smrg		struct drm_i915_gem_sw_finish sw_finish;
152420131375Smrg
152520131375Smrg		/* Cause a flush to happen if the buffer's pinned for
152620131375Smrg		 * scanout, so the results show up in a timely manner.
152720131375Smrg		 * Unlike GTT set domains, this only does work if the
152820131375Smrg		 * buffer should be scanout-related.
152920131375Smrg		 */
153020131375Smrg		VG_CLEAR(sw_finish);
153120131375Smrg		sw_finish.handle = bo_gem->gem_handle;
153220131375Smrg		ret = drmIoctl(bufmgr_gem->fd,
153320131375Smrg			       DRM_IOCTL_I915_GEM_SW_FINISH,
153420131375Smrg			       &sw_finish);
153520131375Smrg		ret = ret == -1 ? -errno : 0;
153620131375Smrg
153720131375Smrg		bo_gem->mapped_cpu_write = false;
153820131375Smrg	}
153922944501Smrg
154020131375Smrg	/* We need to unmap after every innovation as we cannot track
154120131375Smrg	 * an open vma for every bo as that will exhaasut the system
154220131375Smrg	 * limits and cause later failures.
154320131375Smrg	 */
154420131375Smrg	if (--bo_gem->map_count == 0) {
154520131375Smrg		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
154620131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
154720131375Smrg		bo->virtual = NULL;
154820131375Smrg	}
154922944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
155022944501Smrg
155122944501Smrg	return ret;
155222944501Smrg}
155322944501Smrg
1554a884aba1Smrgdrm_public int
1555a884aba1Smrgdrm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
155620131375Smrg{
155720131375Smrg	return drm_intel_gem_bo_unmap(bo);
155820131375Smrg}
155920131375Smrg
156022944501Smrgstatic int
156122944501Smrgdrm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
156222944501Smrg			 unsigned long size, const void *data)
156322944501Smrg{
156422944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
156522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
156622944501Smrg	struct drm_i915_gem_pwrite pwrite;
156722944501Smrg	int ret;
156822944501Smrg
1569a884aba1Smrg	if (bo_gem->is_userptr)
1570a884aba1Smrg		return -EINVAL;
1571a884aba1Smrg
157220131375Smrg	VG_CLEAR(pwrite);
157322944501Smrg	pwrite.handle = bo_gem->gem_handle;
157422944501Smrg	pwrite.offset = offset;
157522944501Smrg	pwrite.size = size;
157622944501Smrg	pwrite.data_ptr = (uint64_t) (uintptr_t) data;
15776d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
15786d98c517Smrg		       DRM_IOCTL_I915_GEM_PWRITE,
15796d98c517Smrg		       &pwrite);
158022944501Smrg	if (ret != 0) {
158122944501Smrg		ret = -errno;
15829ce4edccSmrg		DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
15839ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
15849ce4edccSmrg		    (int)size, strerror(errno));
158522944501Smrg	}
158622944501Smrg
158722944501Smrg	return ret;
158822944501Smrg}
158922944501Smrg
159022944501Smrgstatic int
159122944501Smrgdrm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
159222944501Smrg{
159322944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
159422944501Smrg	struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
159522944501Smrg	int ret;
159622944501Smrg
159720131375Smrg	VG_CLEAR(get_pipe_from_crtc_id);
159822944501Smrg	get_pipe_from_crtc_id.crtc_id = crtc_id;
15996d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
16006d98c517Smrg		       DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
16016d98c517Smrg		       &get_pipe_from_crtc_id);
160222944501Smrg	if (ret != 0) {
160322944501Smrg		/* We return -1 here to signal that we don't
160422944501Smrg		 * know which pipe is associated with this crtc.
160522944501Smrg		 * This lets the caller know that this information
160622944501Smrg		 * isn't available; using the wrong pipe for
160722944501Smrg		 * vblank waiting can cause the chipset to lock up
160822944501Smrg		 */
160922944501Smrg		return -1;
161022944501Smrg	}
161122944501Smrg
161222944501Smrg	return get_pipe_from_crtc_id.pipe;
161322944501Smrg}
161422944501Smrg
161522944501Smrgstatic int
161622944501Smrgdrm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
161722944501Smrg			     unsigned long size, void *data)
161822944501Smrg{
161922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
162022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
162122944501Smrg	struct drm_i915_gem_pread pread;
162222944501Smrg	int ret;
162322944501Smrg
1624a884aba1Smrg	if (bo_gem->is_userptr)
1625a884aba1Smrg		return -EINVAL;
1626a884aba1Smrg
162720131375Smrg	VG_CLEAR(pread);
162822944501Smrg	pread.handle = bo_gem->gem_handle;
162922944501Smrg	pread.offset = offset;
163022944501Smrg	pread.size = size;
163122944501Smrg	pread.data_ptr = (uint64_t) (uintptr_t) data;
16326d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
16336d98c517Smrg		       DRM_IOCTL_I915_GEM_PREAD,
16346d98c517Smrg		       &pread);
163522944501Smrg	if (ret != 0) {
163622944501Smrg		ret = -errno;
16379ce4edccSmrg		DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
16389ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
16399ce4edccSmrg		    (int)size, strerror(errno));
164022944501Smrg	}
164122944501Smrg
164222944501Smrg	return ret;
164322944501Smrg}
164422944501Smrg
16459ce4edccSmrg/** Waits for all GPU rendering with the object to have completed. */
164622944501Smrgstatic void
164722944501Smrgdrm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
164822944501Smrg{
16499ce4edccSmrg	drm_intel_gem_bo_start_gtt_access(bo, 1);
165022944501Smrg}
165122944501Smrg
165220131375Smrg/**
165320131375Smrg * Waits on a BO for the given amount of time.
165420131375Smrg *
165520131375Smrg * @bo: buffer object to wait for
165620131375Smrg * @timeout_ns: amount of time to wait in nanoseconds.
165720131375Smrg *   If value is less than 0, an infinite wait will occur.
165820131375Smrg *
165920131375Smrg * Returns 0 if the wait was successful ie. the last batch referencing the
166020131375Smrg * object has completed within the allotted time. Otherwise some negative return
166120131375Smrg * value describes the error. Of particular interest is -ETIME when the wait has
166220131375Smrg * failed to yield the desired result.
166320131375Smrg *
166420131375Smrg * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
166520131375Smrg * the operation to give up after a certain amount of time. Another subtle
166620131375Smrg * difference is the internal locking semantics are different (this variant does
166720131375Smrg * not hold the lock for the duration of the wait). This makes the wait subject
166820131375Smrg * to a larger userspace race window.
166920131375Smrg *
167020131375Smrg * The implementation shall wait until the object is no longer actively
167120131375Smrg * referenced within a batch buffer at the time of the call. The wait will
167220131375Smrg * not guarantee that the buffer is re-issued via another thread, or an flinked
167320131375Smrg * handle. Userspace must make sure this race does not occur if such precision
167420131375Smrg * is important.
167520131375Smrg */
1676a884aba1Smrgdrm_public int
1677a884aba1Smrgdrm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
167820131375Smrg{
167920131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
168020131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
168120131375Smrg	struct drm_i915_gem_wait wait;
168220131375Smrg	int ret;
168320131375Smrg
168420131375Smrg	if (!bufmgr_gem->has_wait_timeout) {
168520131375Smrg		DBG("%s:%d: Timed wait is not supported. Falling back to "
168620131375Smrg		    "infinite wait\n", __FILE__, __LINE__);
168720131375Smrg		if (timeout_ns) {
168820131375Smrg			drm_intel_gem_bo_wait_rendering(bo);
168920131375Smrg			return 0;
169020131375Smrg		} else {
169120131375Smrg			return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
169220131375Smrg		}
169320131375Smrg	}
169420131375Smrg
169520131375Smrg	wait.bo_handle = bo_gem->gem_handle;
169620131375Smrg	wait.timeout_ns = timeout_ns;
169720131375Smrg	wait.flags = 0;
169820131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
169920131375Smrg	if (ret == -1)
170020131375Smrg		return -errno;
170120131375Smrg
170220131375Smrg	return ret;
170320131375Smrg}
170420131375Smrg
170522944501Smrg/**
170622944501Smrg * Sets the object to the GTT read and possibly write domain, used by the X
170722944501Smrg * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
170822944501Smrg *
170922944501Smrg * In combination with drm_intel_gem_bo_pin() and manual fence management, we
171022944501Smrg * can do tiled pixmaps this way.
171122944501Smrg */
1712a884aba1Smrgdrm_public void
171322944501Smrgdrm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
171422944501Smrg{
171522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
171622944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
171722944501Smrg	struct drm_i915_gem_set_domain set_domain;
171822944501Smrg	int ret;
171922944501Smrg
172020131375Smrg	VG_CLEAR(set_domain);
172122944501Smrg	set_domain.handle = bo_gem->gem_handle;
172222944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
172322944501Smrg	set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
17246d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
17256d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
17266d98c517Smrg		       &set_domain);
172722944501Smrg	if (ret != 0) {
17289ce4edccSmrg		DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
17299ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
17309ce4edccSmrg		    set_domain.read_domains, set_domain.write_domain,
17319ce4edccSmrg		    strerror(errno));
173222944501Smrg	}
173322944501Smrg}
173422944501Smrg
173522944501Smrgstatic void
173622944501Smrgdrm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
173722944501Smrg{
173822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
173922944501Smrg	int i;
174022944501Smrg
174122944501Smrg	free(bufmgr_gem->exec2_objects);
174222944501Smrg	free(bufmgr_gem->exec_objects);
174322944501Smrg	free(bufmgr_gem->exec_bos);
174420131375Smrg	free(bufmgr_gem->aub_filename);
174522944501Smrg
174622944501Smrg	pthread_mutex_destroy(&bufmgr_gem->lock);
174722944501Smrg
174822944501Smrg	/* Free any cached buffer objects we were going to reuse */
1749aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
175022944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
175122944501Smrg		    &bufmgr_gem->cache_bucket[i];
175222944501Smrg		drm_intel_bo_gem *bo_gem;
175322944501Smrg
175422944501Smrg		while (!DRMLISTEMPTY(&bucket->head)) {
175522944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
175622944501Smrg					      bucket->head.next, head);
175722944501Smrg			DRMLISTDEL(&bo_gem->head);
175822944501Smrg
175922944501Smrg			drm_intel_gem_bo_free(&bo_gem->bo);
176022944501Smrg		}
176122944501Smrg	}
176222944501Smrg
176322944501Smrg	free(bufmgr);
176422944501Smrg}
176522944501Smrg
176622944501Smrg/**
176722944501Smrg * Adds the target buffer to the validation list and adds the relocation
176822944501Smrg * to the reloc_buffer's relocation list.
176922944501Smrg *
177022944501Smrg * The relocation entry at the given offset must already contain the
177122944501Smrg * precomputed relocation value, because the kernel will optimize out
177222944501Smrg * the relocation entry write when the buffer hasn't moved from the
177322944501Smrg * last known offset in target_bo.
177422944501Smrg */
177522944501Smrgstatic int
177622944501Smrgdo_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
177722944501Smrg		 drm_intel_bo *target_bo, uint32_t target_offset,
177822944501Smrg		 uint32_t read_domains, uint32_t write_domain,
177920131375Smrg		 bool need_fence)
178022944501Smrg{
178122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
178222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
178322944501Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
178420131375Smrg	bool fenced_command;
178522944501Smrg
178622944501Smrg	if (bo_gem->has_error)
178722944501Smrg		return -ENOMEM;
178822944501Smrg
178922944501Smrg	if (target_bo_gem->has_error) {
179020131375Smrg		bo_gem->has_error = true;
179122944501Smrg		return -ENOMEM;
179222944501Smrg	}
179322944501Smrg
179422944501Smrg	/* We never use HW fences for rendering on 965+ */
179522944501Smrg	if (bufmgr_gem->gen >= 4)
179620131375Smrg		need_fence = false;
179722944501Smrg
17989ce4edccSmrg	fenced_command = need_fence;
17999ce4edccSmrg	if (target_bo_gem->tiling_mode == I915_TILING_NONE)
180020131375Smrg		need_fence = false;
18019ce4edccSmrg
180222944501Smrg	/* Create a new relocation list if needed */
180322944501Smrg	if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
180422944501Smrg		return -ENOMEM;
180522944501Smrg
180622944501Smrg	/* Check overflow */
180722944501Smrg	assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
180822944501Smrg
180922944501Smrg	/* Check args */
181022944501Smrg	assert(offset <= bo->size - 4);
181122944501Smrg	assert((write_domain & (write_domain - 1)) == 0);
181222944501Smrg
181322944501Smrg	/* Make sure that we're not adding a reloc to something whose size has
181422944501Smrg	 * already been accounted for.
181522944501Smrg	 */
181622944501Smrg	assert(!bo_gem->used_as_reloc_target);
1817aaba2545Smrg	if (target_bo_gem != bo_gem) {
181820131375Smrg		target_bo_gem->used_as_reloc_target = true;
1819aaba2545Smrg		bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1820aaba2545Smrg	}
182122944501Smrg	/* An object needing a fence is a tiled buffer, so it won't have
182222944501Smrg	 * relocs to other buffers.
182322944501Smrg	 */
182422944501Smrg	if (need_fence)
182522944501Smrg		target_bo_gem->reloc_tree_fences = 1;
182622944501Smrg	bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
182722944501Smrg
182822944501Smrg	bo_gem->relocs[bo_gem->reloc_count].offset = offset;
182922944501Smrg	bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
183022944501Smrg	bo_gem->relocs[bo_gem->reloc_count].target_handle =
183122944501Smrg	    target_bo_gem->gem_handle;
183222944501Smrg	bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
183322944501Smrg	bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
183420131375Smrg	bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
183522944501Smrg
183622944501Smrg	bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1837aaba2545Smrg	if (target_bo != bo)
1838aaba2545Smrg		drm_intel_gem_bo_reference(target_bo);
18399ce4edccSmrg	if (fenced_command)
184022944501Smrg		bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
184122944501Smrg			DRM_INTEL_RELOC_FENCE;
184222944501Smrg	else
184322944501Smrg		bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
184422944501Smrg
184522944501Smrg	bo_gem->reloc_count++;
184622944501Smrg
184722944501Smrg	return 0;
184822944501Smrg}
184922944501Smrg
185022944501Smrgstatic int
185122944501Smrgdrm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
185222944501Smrg			    drm_intel_bo *target_bo, uint32_t target_offset,
185322944501Smrg			    uint32_t read_domains, uint32_t write_domain)
185422944501Smrg{
185522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
185622944501Smrg
185722944501Smrg	return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
185822944501Smrg				read_domains, write_domain,
185922944501Smrg				!bufmgr_gem->fenced_relocs);
186022944501Smrg}
186122944501Smrg
186222944501Smrgstatic int
186322944501Smrgdrm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
186422944501Smrg				  drm_intel_bo *target_bo,
186522944501Smrg				  uint32_t target_offset,
186622944501Smrg				  uint32_t read_domains, uint32_t write_domain)
186722944501Smrg{
186822944501Smrg	return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
186920131375Smrg				read_domains, write_domain, true);
187020131375Smrg}
187120131375Smrg
1872a884aba1Smrgdrm_public int
187320131375Smrgdrm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
187420131375Smrg{
187520131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
187620131375Smrg
187720131375Smrg	return bo_gem->reloc_count;
187820131375Smrg}
187920131375Smrg
188020131375Smrg/**
188120131375Smrg * Removes existing relocation entries in the BO after "start".
188220131375Smrg *
188320131375Smrg * This allows a user to avoid a two-step process for state setup with
188420131375Smrg * counting up all the buffer objects and doing a
188520131375Smrg * drm_intel_bufmgr_check_aperture_space() before emitting any of the
188620131375Smrg * relocations for the state setup.  Instead, save the state of the
188720131375Smrg * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
188820131375Smrg * state, and then check if it still fits in the aperture.
188920131375Smrg *
189020131375Smrg * Any further drm_intel_bufmgr_check_aperture_space() queries
189120131375Smrg * involving this buffer in the tree are undefined after this call.
189220131375Smrg */
1893a884aba1Smrgdrm_public void
189420131375Smrgdrm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
189520131375Smrg{
1896a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
189720131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
189820131375Smrg	int i;
189920131375Smrg	struct timespec time;
190020131375Smrg
190120131375Smrg	clock_gettime(CLOCK_MONOTONIC, &time);
190220131375Smrg
190320131375Smrg	assert(bo_gem->reloc_count >= start);
1904a884aba1Smrg
190520131375Smrg	/* Unreference the cleared target buffers */
1906a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
1907a884aba1Smrg
190820131375Smrg	for (i = start; i < bo_gem->reloc_count; i++) {
190920131375Smrg		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
191020131375Smrg		if (&target_bo_gem->bo != bo) {
191120131375Smrg			bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
191220131375Smrg			drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
191320131375Smrg								  time.tv_sec);
191420131375Smrg		}
191520131375Smrg	}
191620131375Smrg	bo_gem->reloc_count = start;
1917a884aba1Smrg
1918a884aba1Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
1919a884aba1Smrg
192022944501Smrg}
192122944501Smrg
192222944501Smrg/**
192322944501Smrg * Walk the tree of relocations rooted at BO and accumulate the list of
192422944501Smrg * validations to be performed and update the relocation buffers with
192522944501Smrg * index values into the validation list.
192622944501Smrg */
192722944501Smrgstatic void
192822944501Smrgdrm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
192922944501Smrg{
193022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
193122944501Smrg	int i;
193222944501Smrg
193322944501Smrg	if (bo_gem->relocs == NULL)
193422944501Smrg		return;
193522944501Smrg
193622944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
193722944501Smrg		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
193822944501Smrg
1939aaba2545Smrg		if (target_bo == bo)
1940aaba2545Smrg			continue;
1941aaba2545Smrg
194220131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
194320131375Smrg
194422944501Smrg		/* Continue walking the tree depth-first. */
194522944501Smrg		drm_intel_gem_bo_process_reloc(target_bo);
194622944501Smrg
194722944501Smrg		/* Add the target to the validate list */
194822944501Smrg		drm_intel_add_validate_buffer(target_bo);
194922944501Smrg	}
195022944501Smrg}
195122944501Smrg
195222944501Smrgstatic void
195322944501Smrgdrm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
195422944501Smrg{
195522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
195622944501Smrg	int i;
195722944501Smrg
195822944501Smrg	if (bo_gem->relocs == NULL)
195922944501Smrg		return;
196022944501Smrg
196122944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
196222944501Smrg		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
196322944501Smrg		int need_fence;
196422944501Smrg
1965aaba2545Smrg		if (target_bo == bo)
1966aaba2545Smrg			continue;
1967aaba2545Smrg
196820131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
196920131375Smrg
197022944501Smrg		/* Continue walking the tree depth-first. */
197122944501Smrg		drm_intel_gem_bo_process_reloc2(target_bo);
197222944501Smrg
197322944501Smrg		need_fence = (bo_gem->reloc_target_info[i].flags &
197422944501Smrg			      DRM_INTEL_RELOC_FENCE);
197522944501Smrg
197622944501Smrg		/* Add the target to the validate list */
197722944501Smrg		drm_intel_add_validate_buffer2(target_bo, need_fence);
197822944501Smrg	}
197922944501Smrg}
198022944501Smrg
198122944501Smrg
198222944501Smrgstatic void
198322944501Smrgdrm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
198422944501Smrg{
198522944501Smrg	int i;
198622944501Smrg
198722944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
198822944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
198922944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
199022944501Smrg
199122944501Smrg		/* Update the buffer offset */
199220131375Smrg		if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
1993d82d45b3Sjoerg			DBG("BO %d (%s) migrated: 0x%08llx -> 0x%08llx\n",
1994d82d45b3Sjoerg			    bo_gem->gem_handle, bo_gem->name,
1995d82d45b3Sjoerg			    (unsigned long long)bo->offset64,
199622944501Smrg			    (unsigned long long)bufmgr_gem->exec_objects[i].
199722944501Smrg			    offset);
199820131375Smrg			bo->offset64 = bufmgr_gem->exec_objects[i].offset;
199922944501Smrg			bo->offset = bufmgr_gem->exec_objects[i].offset;
200022944501Smrg		}
200122944501Smrg	}
200222944501Smrg}
200322944501Smrg
200422944501Smrgstatic void
200522944501Smrgdrm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
200622944501Smrg{
200722944501Smrg	int i;
200822944501Smrg
200922944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
201022944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
201122944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
201222944501Smrg
201322944501Smrg		/* Update the buffer offset */
201420131375Smrg		if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2015d82d45b3Sjoerg			DBG("BO %d (%s) migrated: 0x%08llx -> 0x%08llx\n",
2016d82d45b3Sjoerg			    bo_gem->gem_handle, bo_gem->name,
2017d82d45b3Sjoerg			    (unsigned long long)bo->offset64,
201822944501Smrg			    (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
201920131375Smrg			bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
202022944501Smrg			bo->offset = bufmgr_gem->exec2_objects[i].offset;
202122944501Smrg		}
202222944501Smrg	}
202322944501Smrg}
202422944501Smrg
202520131375Smrgstatic void
202620131375Smrgaub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
202720131375Smrg{
202820131375Smrg	fwrite(&data, 1, 4, bufmgr_gem->aub_file);
202920131375Smrg}
203020131375Smrg
203120131375Smrgstatic void
203220131375Smrgaub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
203320131375Smrg{
203420131375Smrg	fwrite(data, 1, size, bufmgr_gem->aub_file);
203520131375Smrg}
203620131375Smrg
203720131375Smrgstatic void
203820131375Smrgaub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
203922944501Smrg{
204022944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
204122944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
204220131375Smrg	uint32_t *data;
204320131375Smrg	unsigned int i;
204422944501Smrg
204520131375Smrg	data = malloc(bo->size);
204620131375Smrg	drm_intel_bo_get_subdata(bo, offset, size, data);
204722944501Smrg
204820131375Smrg	/* Easy mode: write out bo with no relocations */
204920131375Smrg	if (!bo_gem->reloc_count) {
205020131375Smrg		aub_out_data(bufmgr_gem, data, size);
205120131375Smrg		free(data);
205220131375Smrg		return;
205320131375Smrg	}
205422944501Smrg
205520131375Smrg	/* Otherwise, handle the relocations while writing. */
205620131375Smrg	for (i = 0; i < size / 4; i++) {
205720131375Smrg		int r;
205820131375Smrg		for (r = 0; r < bo_gem->reloc_count; r++) {
205920131375Smrg			struct drm_i915_gem_relocation_entry *reloc;
206020131375Smrg			drm_intel_reloc_target *info;
206122944501Smrg
206220131375Smrg			reloc = &bo_gem->relocs[r];
206320131375Smrg			info = &bo_gem->reloc_target_info[r];
206422944501Smrg
206520131375Smrg			if (reloc->offset == offset + i * 4) {
206620131375Smrg				drm_intel_bo_gem *target_gem;
206720131375Smrg				uint32_t val;
206822944501Smrg
206920131375Smrg				target_gem = (drm_intel_bo_gem *)info->bo;
207022944501Smrg
207120131375Smrg				val = reloc->delta;
207220131375Smrg				val += target_gem->aub_offset;
207322944501Smrg
207420131375Smrg				aub_out(bufmgr_gem, val);
207520131375Smrg				data[i] = val;
207620131375Smrg				break;
207720131375Smrg			}
207820131375Smrg		}
207920131375Smrg		if (r == bo_gem->reloc_count) {
208020131375Smrg			/* no relocation, just the data */
208120131375Smrg			aub_out(bufmgr_gem, data[i]);
208220131375Smrg		}
208322944501Smrg	}
208422944501Smrg
208520131375Smrg	free(data);
208622944501Smrg}
208722944501Smrg
208820131375Smrgstatic void
208920131375Smrgaub_bo_get_address(drm_intel_bo *bo)
209022944501Smrg{
209120131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
209220131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
209322944501Smrg
209420131375Smrg	/* Give the object a graphics address in the AUB file.  We
209520131375Smrg	 * don't just use the GEM object address because we do AUB
209620131375Smrg	 * dumping before execution -- we want to successfully log
209720131375Smrg	 * when the hardware might hang, and we might even want to aub
209820131375Smrg	 * capture for a driver trying to execute on a different
209920131375Smrg	 * generation of hardware by disabling the actual kernel exec
210020131375Smrg	 * call.
210120131375Smrg	 */
210220131375Smrg	bo_gem->aub_offset = bufmgr_gem->aub_offset;
210320131375Smrg	bufmgr_gem->aub_offset += bo->size;
210420131375Smrg	/* XXX: Handle aperture overflow. */
210520131375Smrg	assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
210620131375Smrg}
210720131375Smrg
210820131375Smrgstatic void
210920131375Smrgaub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
211020131375Smrg		      uint32_t offset, uint32_t size)
211120131375Smrg{
211220131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
211320131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
211420131375Smrg
211520131375Smrg	aub_out(bufmgr_gem,
211620131375Smrg		CMD_AUB_TRACE_HEADER_BLOCK |
211720131375Smrg		((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
211820131375Smrg	aub_out(bufmgr_gem,
211920131375Smrg		AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
212020131375Smrg	aub_out(bufmgr_gem, subtype);
212120131375Smrg	aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
212220131375Smrg	aub_out(bufmgr_gem, size);
212320131375Smrg	if (bufmgr_gem->gen >= 8)
212420131375Smrg		aub_out(bufmgr_gem, 0);
212520131375Smrg	aub_write_bo_data(bo, offset, size);
212620131375Smrg}
212720131375Smrg
212820131375Smrg/**
212920131375Smrg * Break up large objects into multiple writes.  Otherwise a 128kb VBO
213020131375Smrg * would overflow the 16 bits of size field in the packet header and
213120131375Smrg * everything goes badly after that.
213220131375Smrg */
213320131375Smrgstatic void
213420131375Smrgaub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
213520131375Smrg			    uint32_t offset, uint32_t size)
213620131375Smrg{
213720131375Smrg	uint32_t block_size;
213820131375Smrg	uint32_t sub_offset;
213920131375Smrg
214020131375Smrg	for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
214120131375Smrg		block_size = size - sub_offset;
214220131375Smrg
214320131375Smrg		if (block_size > 8 * 4096)
214420131375Smrg			block_size = 8 * 4096;
214520131375Smrg
214620131375Smrg		aub_write_trace_block(bo, type, subtype, offset + sub_offset,
214720131375Smrg				      block_size);
214820131375Smrg	}
214920131375Smrg}
215020131375Smrg
215120131375Smrgstatic void
215220131375Smrgaub_write_bo(drm_intel_bo *bo)
215320131375Smrg{
215420131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
215520131375Smrg	uint32_t offset = 0;
215620131375Smrg	unsigned i;
215720131375Smrg
215820131375Smrg	aub_bo_get_address(bo);
215920131375Smrg
216020131375Smrg	/* Write out each annotated section separately. */
216120131375Smrg	for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
216220131375Smrg		drm_intel_aub_annotation *annotation =
216320131375Smrg			&bo_gem->aub_annotations[i];
216420131375Smrg		uint32_t ending_offset = annotation->ending_offset;
216520131375Smrg		if (ending_offset > bo->size)
216620131375Smrg			ending_offset = bo->size;
216720131375Smrg		if (ending_offset > offset) {
216820131375Smrg			aub_write_large_trace_block(bo, annotation->type,
216920131375Smrg						    annotation->subtype,
217020131375Smrg						    offset,
217120131375Smrg						    ending_offset - offset);
217220131375Smrg			offset = ending_offset;
217320131375Smrg		}
217420131375Smrg	}
217520131375Smrg
217620131375Smrg	/* Write out any remaining unannotated data */
217720131375Smrg	if (offset < bo->size) {
217820131375Smrg		aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
217920131375Smrg					    offset, bo->size - offset);
218020131375Smrg	}
218120131375Smrg}
218220131375Smrg
218320131375Smrg/*
218420131375Smrg * Make a ringbuffer on fly and dump it
218520131375Smrg */
218620131375Smrgstatic void
218720131375Smrgaub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
218820131375Smrg			  uint32_t batch_buffer, int ring_flag)
218920131375Smrg{
219020131375Smrg	uint32_t ringbuffer[4096];
219120131375Smrg	int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
219220131375Smrg	int ring_count = 0;
219320131375Smrg
219420131375Smrg	if (ring_flag == I915_EXEC_BSD)
219520131375Smrg		ring = AUB_TRACE_TYPE_RING_PRB1;
219620131375Smrg	else if (ring_flag == I915_EXEC_BLT)
219720131375Smrg		ring = AUB_TRACE_TYPE_RING_PRB2;
219820131375Smrg
219920131375Smrg	/* Make a ring buffer to execute our batchbuffer. */
220020131375Smrg	memset(ringbuffer, 0, sizeof(ringbuffer));
220120131375Smrg	if (bufmgr_gem->gen >= 8) {
220220131375Smrg		ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2);
220320131375Smrg		ringbuffer[ring_count++] = batch_buffer;
220420131375Smrg		ringbuffer[ring_count++] = 0;
220520131375Smrg	} else {
220620131375Smrg		ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
220720131375Smrg		ringbuffer[ring_count++] = batch_buffer;
220820131375Smrg	}
220920131375Smrg
221020131375Smrg	/* Write out the ring.  This appears to trigger execution of
221120131375Smrg	 * the ring in the simulator.
221220131375Smrg	 */
221320131375Smrg	aub_out(bufmgr_gem,
221420131375Smrg		CMD_AUB_TRACE_HEADER_BLOCK |
221520131375Smrg		((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
221620131375Smrg	aub_out(bufmgr_gem,
221720131375Smrg		AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
221820131375Smrg	aub_out(bufmgr_gem, 0); /* general/surface subtype */
221920131375Smrg	aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
222020131375Smrg	aub_out(bufmgr_gem, ring_count * 4);
222120131375Smrg	if (bufmgr_gem->gen >= 8)
222220131375Smrg		aub_out(bufmgr_gem, 0);
222320131375Smrg
222420131375Smrg	/* FIXME: Need some flush operations here? */
222520131375Smrg	aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
222620131375Smrg
222720131375Smrg	/* Update offset pointer */
222820131375Smrg	bufmgr_gem->aub_offset += 4096;
222920131375Smrg}
223020131375Smrg
2231a884aba1Smrgdrm_public void
223220131375Smrgdrm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
223320131375Smrg			      int x1, int y1, int width, int height,
223420131375Smrg			      enum aub_dump_bmp_format format,
223520131375Smrg			      int pitch, int offset)
223620131375Smrg{
223720131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
223820131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
223920131375Smrg	uint32_t cpp;
224020131375Smrg
224120131375Smrg	switch (format) {
224220131375Smrg	case AUB_DUMP_BMP_FORMAT_8BIT:
224320131375Smrg		cpp = 1;
224420131375Smrg		break;
224520131375Smrg	case AUB_DUMP_BMP_FORMAT_ARGB_4444:
224620131375Smrg		cpp = 2;
224720131375Smrg		break;
224820131375Smrg	case AUB_DUMP_BMP_FORMAT_ARGB_0888:
224920131375Smrg	case AUB_DUMP_BMP_FORMAT_ARGB_8888:
225020131375Smrg		cpp = 4;
225120131375Smrg		break;
225220131375Smrg	default:
225320131375Smrg		printf("Unknown AUB dump format %d\n", format);
225420131375Smrg		return;
225520131375Smrg	}
225620131375Smrg
225720131375Smrg	if (!bufmgr_gem->aub_file)
225820131375Smrg		return;
225920131375Smrg
226020131375Smrg	aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
226120131375Smrg	aub_out(bufmgr_gem, (y1 << 16) | x1);
226220131375Smrg	aub_out(bufmgr_gem,
226320131375Smrg		(format << 24) |
226420131375Smrg		(cpp << 19) |
226520131375Smrg		pitch / 4);
226620131375Smrg	aub_out(bufmgr_gem, (height << 16) | width);
226720131375Smrg	aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
226820131375Smrg	aub_out(bufmgr_gem,
226920131375Smrg		((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
227020131375Smrg		((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
227120131375Smrg}
227220131375Smrg
227320131375Smrgstatic void
227420131375Smrgaub_exec(drm_intel_bo *bo, int ring_flag, int used)
227520131375Smrg{
227620131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
227720131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
227820131375Smrg	int i;
227920131375Smrg	bool batch_buffer_needs_annotations;
228020131375Smrg
228120131375Smrg	if (!bufmgr_gem->aub_file)
228220131375Smrg		return;
228320131375Smrg
228420131375Smrg	/* If batch buffer is not annotated, annotate it the best we
228520131375Smrg	 * can.
228620131375Smrg	 */
228720131375Smrg	batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
228820131375Smrg	if (batch_buffer_needs_annotations) {
228920131375Smrg		drm_intel_aub_annotation annotations[2] = {
229020131375Smrg			{ AUB_TRACE_TYPE_BATCH, 0, used },
229120131375Smrg			{ AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
229220131375Smrg		};
229320131375Smrg		drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
229420131375Smrg	}
229520131375Smrg
229620131375Smrg	/* Write out all buffers to AUB memory */
229720131375Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
229820131375Smrg		aub_write_bo(bufmgr_gem->exec_bos[i]);
229920131375Smrg	}
230020131375Smrg
230120131375Smrg	/* Remove any annotations we added */
230220131375Smrg	if (batch_buffer_needs_annotations)
230320131375Smrg		drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
230420131375Smrg
230520131375Smrg	/* Dump ring buffer */
230620131375Smrg	aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
230720131375Smrg
230820131375Smrg	fflush(bufmgr_gem->aub_file);
230920131375Smrg
231020131375Smrg	/*
231120131375Smrg	 * One frame has been dumped. So reset the aub_offset for the next frame.
231220131375Smrg	 *
231320131375Smrg	 * FIXME: Can we do this?
231420131375Smrg	 */
231520131375Smrg	bufmgr_gem->aub_offset = 0x10000;
231620131375Smrg}
231720131375Smrg
231820131375Smrgstatic int
231920131375Smrgdrm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
232020131375Smrg		      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
232120131375Smrg{
232220131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
232320131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
232420131375Smrg	struct drm_i915_gem_execbuffer execbuf;
232520131375Smrg	int ret, i;
232620131375Smrg
232720131375Smrg	if (bo_gem->has_error)
232820131375Smrg		return -ENOMEM;
232920131375Smrg
233020131375Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
233120131375Smrg	/* Update indices and set up the validate list. */
233220131375Smrg	drm_intel_gem_bo_process_reloc(bo);
233320131375Smrg
233420131375Smrg	/* Add the batch buffer to the validation list.  There are no
233520131375Smrg	 * relocations pointing to it.
233620131375Smrg	 */
233720131375Smrg	drm_intel_add_validate_buffer(bo);
233820131375Smrg
233920131375Smrg	VG_CLEAR(execbuf);
234020131375Smrg	execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
234120131375Smrg	execbuf.buffer_count = bufmgr_gem->exec_count;
234220131375Smrg	execbuf.batch_start_offset = 0;
234320131375Smrg	execbuf.batch_len = used;
234420131375Smrg	execbuf.cliprects_ptr = (uintptr_t) cliprects;
234520131375Smrg	execbuf.num_cliprects = num_cliprects;
234620131375Smrg	execbuf.DR1 = 0;
234720131375Smrg	execbuf.DR4 = DR4;
234820131375Smrg
234920131375Smrg	ret = drmIoctl(bufmgr_gem->fd,
235020131375Smrg		       DRM_IOCTL_I915_GEM_EXECBUFFER,
235120131375Smrg		       &execbuf);
235220131375Smrg	if (ret != 0) {
235320131375Smrg		ret = -errno;
235420131375Smrg		if (errno == ENOSPC) {
235520131375Smrg			DBG("Execbuffer fails to pin. "
235620131375Smrg			    "Estimate: %u. Actual: %u. Available: %u\n",
235720131375Smrg			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
235820131375Smrg							       bufmgr_gem->
235920131375Smrg							       exec_count),
236020131375Smrg			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
236120131375Smrg							      bufmgr_gem->
236220131375Smrg							      exec_count),
236320131375Smrg			    (unsigned int)bufmgr_gem->gtt_size);
236420131375Smrg		}
236520131375Smrg	}
236620131375Smrg	drm_intel_update_buffer_offsets(bufmgr_gem);
236720131375Smrg
236820131375Smrg	if (bufmgr_gem->bufmgr.debug)
236920131375Smrg		drm_intel_gem_dump_validation_list(bufmgr_gem);
237020131375Smrg
237120131375Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
237220131375Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
237320131375Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
237420131375Smrg
237520131375Smrg		bo_gem->idle = false;
237620131375Smrg
237720131375Smrg		/* Disconnect the buffer from the validate list */
237820131375Smrg		bo_gem->validate_index = -1;
237920131375Smrg		bufmgr_gem->exec_bos[i] = NULL;
238020131375Smrg	}
238120131375Smrg	bufmgr_gem->exec_count = 0;
238220131375Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
238320131375Smrg
238420131375Smrg	return ret;
238520131375Smrg}
238620131375Smrg
238720131375Smrgstatic int
238820131375Smrgdo_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
238920131375Smrg	 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
239020131375Smrg	 unsigned int flags)
239120131375Smrg{
239220131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
239320131375Smrg	struct drm_i915_gem_execbuffer2 execbuf;
239420131375Smrg	int ret = 0;
239520131375Smrg	int i;
239620131375Smrg
239720131375Smrg	switch (flags & 0x7) {
239820131375Smrg	default:
239920131375Smrg		return -EINVAL;
240020131375Smrg	case I915_EXEC_BLT:
24019ce4edccSmrg		if (!bufmgr_gem->has_blt)
24029ce4edccSmrg			return -EINVAL;
24039ce4edccSmrg		break;
24049ce4edccSmrg	case I915_EXEC_BSD:
24059ce4edccSmrg		if (!bufmgr_gem->has_bsd)
24069ce4edccSmrg			return -EINVAL;
24079ce4edccSmrg		break;
240820131375Smrg	case I915_EXEC_VEBOX:
240920131375Smrg		if (!bufmgr_gem->has_vebox)
241020131375Smrg			return -EINVAL;
241120131375Smrg		break;
24129ce4edccSmrg	case I915_EXEC_RENDER:
24139ce4edccSmrg	case I915_EXEC_DEFAULT:
24149ce4edccSmrg		break;
24159ce4edccSmrg	}
2416aaba2545Smrg
241722944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
241822944501Smrg	/* Update indices and set up the validate list. */
241922944501Smrg	drm_intel_gem_bo_process_reloc2(bo);
242022944501Smrg
242122944501Smrg	/* Add the batch buffer to the validation list.  There are no relocations
242222944501Smrg	 * pointing to it.
242322944501Smrg	 */
242422944501Smrg	drm_intel_add_validate_buffer2(bo, 0);
242522944501Smrg
242620131375Smrg	VG_CLEAR(execbuf);
242722944501Smrg	execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
242822944501Smrg	execbuf.buffer_count = bufmgr_gem->exec_count;
242922944501Smrg	execbuf.batch_start_offset = 0;
243022944501Smrg	execbuf.batch_len = used;
243122944501Smrg	execbuf.cliprects_ptr = (uintptr_t)cliprects;
243222944501Smrg	execbuf.num_cliprects = num_cliprects;
243322944501Smrg	execbuf.DR1 = 0;
243422944501Smrg	execbuf.DR4 = DR4;
243520131375Smrg	execbuf.flags = flags;
243620131375Smrg	if (ctx == NULL)
243720131375Smrg		i915_execbuffer2_set_context_id(execbuf, 0);
243820131375Smrg	else
243920131375Smrg		i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
244022944501Smrg	execbuf.rsvd2 = 0;
244122944501Smrg
244220131375Smrg	aub_exec(bo, flags, used);
244320131375Smrg
244420131375Smrg	if (bufmgr_gem->no_exec)
244520131375Smrg		goto skip_execution;
244620131375Smrg
24476d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
24486d98c517Smrg		       DRM_IOCTL_I915_GEM_EXECBUFFER2,
24496d98c517Smrg		       &execbuf);
245022944501Smrg	if (ret != 0) {
245122944501Smrg		ret = -errno;
24526d98c517Smrg		if (ret == -ENOSPC) {
24539ce4edccSmrg			DBG("Execbuffer fails to pin. "
24549ce4edccSmrg			    "Estimate: %u. Actual: %u. Available: %u\n",
24559ce4edccSmrg			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
24569ce4edccSmrg							       bufmgr_gem->exec_count),
24579ce4edccSmrg			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
24589ce4edccSmrg							      bufmgr_gem->exec_count),
24599ce4edccSmrg			    (unsigned int) bufmgr_gem->gtt_size);
246022944501Smrg		}
246122944501Smrg	}
246222944501Smrg	drm_intel_update_buffer_offsets2(bufmgr_gem);
246322944501Smrg
246420131375Smrgskip_execution:
246522944501Smrg	if (bufmgr_gem->bufmgr.debug)
246622944501Smrg		drm_intel_gem_dump_validation_list(bufmgr_gem);
246722944501Smrg
246822944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
246922944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
247022944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
247122944501Smrg
247220131375Smrg		bo_gem->idle = false;
247320131375Smrg
247422944501Smrg		/* Disconnect the buffer from the validate list */
247522944501Smrg		bo_gem->validate_index = -1;
247622944501Smrg		bufmgr_gem->exec_bos[i] = NULL;
247722944501Smrg	}
247822944501Smrg	bufmgr_gem->exec_count = 0;
247922944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
248022944501Smrg
248122944501Smrg	return ret;
248222944501Smrg}
248322944501Smrg
2484aaba2545Smrgstatic int
2485aaba2545Smrgdrm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2486aaba2545Smrg		       drm_clip_rect_t *cliprects, int num_cliprects,
2487aaba2545Smrg		       int DR4)
2488aaba2545Smrg{
248920131375Smrg	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
249020131375Smrg			I915_EXEC_RENDER);
249120131375Smrg}
249220131375Smrg
249320131375Smrgstatic int
249420131375Smrgdrm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
249520131375Smrg			drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
249620131375Smrg			unsigned int flags)
249720131375Smrg{
249820131375Smrg	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
249920131375Smrg			flags);
250020131375Smrg}
250120131375Smrg
2502a884aba1Smrgdrm_public int
250320131375Smrgdrm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
250420131375Smrg			      int used, unsigned int flags)
250520131375Smrg{
250620131375Smrg	return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2507aaba2545Smrg}
2508aaba2545Smrg
250922944501Smrgstatic int
251022944501Smrgdrm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
251122944501Smrg{
251222944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
251322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
251422944501Smrg	struct drm_i915_gem_pin pin;
251522944501Smrg	int ret;
251622944501Smrg
251720131375Smrg	VG_CLEAR(pin);
251822944501Smrg	pin.handle = bo_gem->gem_handle;
251922944501Smrg	pin.alignment = alignment;
252022944501Smrg
25216d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
25226d98c517Smrg		       DRM_IOCTL_I915_GEM_PIN,
25236d98c517Smrg		       &pin);
252422944501Smrg	if (ret != 0)
252522944501Smrg		return -errno;
252622944501Smrg
252720131375Smrg	bo->offset64 = pin.offset;
252822944501Smrg	bo->offset = pin.offset;
252922944501Smrg	return 0;
253022944501Smrg}
253122944501Smrg
253222944501Smrgstatic int
253322944501Smrgdrm_intel_gem_bo_unpin(drm_intel_bo *bo)
253422944501Smrg{
253522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
253622944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
253722944501Smrg	struct drm_i915_gem_unpin unpin;
253822944501Smrg	int ret;
253922944501Smrg
254020131375Smrg	VG_CLEAR(unpin);
254122944501Smrg	unpin.handle = bo_gem->gem_handle;
254222944501Smrg
25436d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
254422944501Smrg	if (ret != 0)
254522944501Smrg		return -errno;
254622944501Smrg
254722944501Smrg	return 0;
254822944501Smrg}
254922944501Smrg
255022944501Smrgstatic int
25516d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
25526d98c517Smrg				     uint32_t tiling_mode,
25536d98c517Smrg				     uint32_t stride)
255422944501Smrg{
255522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
255622944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
255722944501Smrg	struct drm_i915_gem_set_tiling set_tiling;
255822944501Smrg	int ret;
255922944501Smrg
25606d98c517Smrg	if (bo_gem->global_name == 0 &&
25616d98c517Smrg	    tiling_mode == bo_gem->tiling_mode &&
25626d98c517Smrg	    stride == bo_gem->stride)
256322944501Smrg		return 0;
256422944501Smrg
256522944501Smrg	memset(&set_tiling, 0, sizeof(set_tiling));
256622944501Smrg	do {
25676d98c517Smrg		/* set_tiling is slightly broken and overwrites the
25686d98c517Smrg		 * input on the error path, so we have to open code
25696d98c517Smrg		 * rmIoctl.
25706d98c517Smrg		 */
25716d98c517Smrg		set_tiling.handle = bo_gem->gem_handle;
25726d98c517Smrg		set_tiling.tiling_mode = tiling_mode;
257322944501Smrg		set_tiling.stride = stride;
257422944501Smrg
257522944501Smrg		ret = ioctl(bufmgr_gem->fd,
257622944501Smrg			    DRM_IOCTL_I915_GEM_SET_TILING,
257722944501Smrg			    &set_tiling);
25786d98c517Smrg	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
25796d98c517Smrg	if (ret == -1)
25806d98c517Smrg		return -errno;
25816d98c517Smrg
25826d98c517Smrg	bo_gem->tiling_mode = set_tiling.tiling_mode;
25836d98c517Smrg	bo_gem->swizzle_mode = set_tiling.swizzle_mode;
25846d98c517Smrg	bo_gem->stride = set_tiling.stride;
25856d98c517Smrg	return 0;
25866d98c517Smrg}
25876d98c517Smrg
25886d98c517Smrgstatic int
25896d98c517Smrgdrm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
25906d98c517Smrg			    uint32_t stride)
25916d98c517Smrg{
25926d98c517Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
25936d98c517Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
25946d98c517Smrg	int ret;
25956d98c517Smrg
2596a884aba1Smrg	/* Tiling with userptr surfaces is not supported
2597a884aba1Smrg	 * on all hardware so refuse it for time being.
2598a884aba1Smrg	 */
2599a884aba1Smrg	if (bo_gem->is_userptr)
2600a884aba1Smrg		return -EINVAL;
2601a884aba1Smrg
26026d98c517Smrg	/* Linear buffers have no stride. By ensuring that we only ever use
26036d98c517Smrg	 * stride 0 with linear buffers, we simplify our code.
26046d98c517Smrg	 */
26056d98c517Smrg	if (*tiling_mode == I915_TILING_NONE)
26066d98c517Smrg		stride = 0;
26076d98c517Smrg
26086d98c517Smrg	ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
26096d98c517Smrg	if (ret == 0)
2610aaba2545Smrg		drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
261122944501Smrg
261222944501Smrg	*tiling_mode = bo_gem->tiling_mode;
2613aaba2545Smrg	return ret;
261422944501Smrg}
261522944501Smrg
261622944501Smrgstatic int
261722944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
261822944501Smrg			    uint32_t * swizzle_mode)
261922944501Smrg{
262022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
262122944501Smrg
262222944501Smrg	*tiling_mode = bo_gem->tiling_mode;
262322944501Smrg	*swizzle_mode = bo_gem->swizzle_mode;
262422944501Smrg	return 0;
262522944501Smrg}
262622944501Smrg
2627a884aba1Smrgdrm_public drm_intel_bo *
262820131375Smrgdrm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
262920131375Smrg{
263020131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
263120131375Smrg	int ret;
263220131375Smrg	uint32_t handle;
263320131375Smrg	drm_intel_bo_gem *bo_gem;
263420131375Smrg	struct drm_i915_gem_get_tiling get_tiling;
263520131375Smrg	drmMMListHead *list;
263620131375Smrg
263720131375Smrg	ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
263820131375Smrg
263920131375Smrg	/*
264020131375Smrg	 * See if the kernel has already returned this buffer to us. Just as
264120131375Smrg	 * for named buffers, we must not create two bo's pointing at the same
264220131375Smrg	 * kernel object
264320131375Smrg	 */
2644a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
264520131375Smrg	for (list = bufmgr_gem->named.next;
264620131375Smrg	     list != &bufmgr_gem->named;
264720131375Smrg	     list = list->next) {
264820131375Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
264920131375Smrg		if (bo_gem->gem_handle == handle) {
265020131375Smrg			drm_intel_gem_bo_reference(&bo_gem->bo);
2651a884aba1Smrg			pthread_mutex_unlock(&bufmgr_gem->lock);
265220131375Smrg			return &bo_gem->bo;
265320131375Smrg		}
265420131375Smrg	}
265520131375Smrg
265620131375Smrg	if (ret) {
265720131375Smrg	  fprintf(stderr,"ret is %d %d\n", ret, errno);
2658a884aba1Smrg	  pthread_mutex_unlock(&bufmgr_gem->lock);
265920131375Smrg		return NULL;
266020131375Smrg	}
266120131375Smrg
266220131375Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
2663a884aba1Smrg	if (!bo_gem) {
2664a884aba1Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
266520131375Smrg		return NULL;
2666a884aba1Smrg	}
266720131375Smrg	/* Determine size of bo.  The fd-to-handle ioctl really should
266820131375Smrg	 * return the size, but it doesn't.  If we have kernel 3.12 or
266920131375Smrg	 * later, we can lseek on the prime fd to get the size.  Older
267020131375Smrg	 * kernels will just fail, in which case we fall back to the
267120131375Smrg	 * provided (estimated or guess size). */
267220131375Smrg	ret = lseek(prime_fd, 0, SEEK_END);
267320131375Smrg	if (ret != -1)
267420131375Smrg		bo_gem->bo.size = ret;
267520131375Smrg	else
267620131375Smrg		bo_gem->bo.size = size;
267720131375Smrg
267820131375Smrg	bo_gem->bo.handle = handle;
267920131375Smrg	bo_gem->bo.bufmgr = bufmgr;
268020131375Smrg
268120131375Smrg	bo_gem->gem_handle = handle;
268220131375Smrg
268320131375Smrg	atomic_set(&bo_gem->refcount, 1);
268420131375Smrg
268520131375Smrg	bo_gem->name = "prime";
268620131375Smrg	bo_gem->validate_index = -1;
268720131375Smrg	bo_gem->reloc_tree_fences = 0;
268820131375Smrg	bo_gem->used_as_reloc_target = false;
268920131375Smrg	bo_gem->has_error = false;
269020131375Smrg	bo_gem->reusable = false;
269120131375Smrg
269220131375Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
269320131375Smrg	DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2694a884aba1Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
269520131375Smrg
269620131375Smrg	VG_CLEAR(get_tiling);
269720131375Smrg	get_tiling.handle = bo_gem->gem_handle;
269820131375Smrg	ret = drmIoctl(bufmgr_gem->fd,
269920131375Smrg		       DRM_IOCTL_I915_GEM_GET_TILING,
270020131375Smrg		       &get_tiling);
270120131375Smrg	if (ret != 0) {
270220131375Smrg		drm_intel_gem_bo_unreference(&bo_gem->bo);
270320131375Smrg		return NULL;
270420131375Smrg	}
270520131375Smrg	bo_gem->tiling_mode = get_tiling.tiling_mode;
270620131375Smrg	bo_gem->swizzle_mode = get_tiling.swizzle_mode;
270720131375Smrg	/* XXX stride is unknown */
270820131375Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
270920131375Smrg
271020131375Smrg	return &bo_gem->bo;
271120131375Smrg}
271220131375Smrg
2713a884aba1Smrgdrm_public int
271420131375Smrgdrm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
271520131375Smrg{
271620131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
271720131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
271820131375Smrg
2719a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
272020131375Smrg        if (DRMLISTEMPTY(&bo_gem->name_list))
272120131375Smrg                DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2722a884aba1Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
272320131375Smrg
272420131375Smrg	if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
272520131375Smrg			       DRM_CLOEXEC, prime_fd) != 0)
272620131375Smrg		return -errno;
272720131375Smrg
272820131375Smrg	bo_gem->reusable = false;
272920131375Smrg
273020131375Smrg	return 0;
273120131375Smrg}
273220131375Smrg
273322944501Smrgstatic int
273422944501Smrgdrm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
273522944501Smrg{
273622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
273722944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
273822944501Smrg	int ret;
273922944501Smrg
274022944501Smrg	if (!bo_gem->global_name) {
274120131375Smrg		struct drm_gem_flink flink;
274220131375Smrg
274320131375Smrg		VG_CLEAR(flink);
274422944501Smrg		flink.handle = bo_gem->gem_handle;
274522944501Smrg
2746a884aba1Smrg		pthread_mutex_lock(&bufmgr_gem->lock);
2747a884aba1Smrg
27486d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2749a884aba1Smrg		if (ret != 0) {
2750a884aba1Smrg			pthread_mutex_unlock(&bufmgr_gem->lock);
275122944501Smrg			return -errno;
2752a884aba1Smrg		}
275320131375Smrg
275422944501Smrg		bo_gem->global_name = flink.name;
275520131375Smrg		bo_gem->reusable = false;
275620131375Smrg
275720131375Smrg                if (DRMLISTEMPTY(&bo_gem->name_list))
275820131375Smrg                        DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2759a884aba1Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
276022944501Smrg	}
276122944501Smrg
276222944501Smrg	*name = bo_gem->global_name;
276322944501Smrg	return 0;
276422944501Smrg}
276522944501Smrg
276622944501Smrg/**
276722944501Smrg * Enables unlimited caching of buffer objects for reuse.
276822944501Smrg *
276922944501Smrg * This is potentially very memory expensive, as the cache at each bucket
277022944501Smrg * size is only bounded by how many buffers of that size we've managed to have
277122944501Smrg * in flight at once.
277222944501Smrg */
2773a884aba1Smrgdrm_public void
277422944501Smrgdrm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
277522944501Smrg{
277622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
277722944501Smrg
277820131375Smrg	bufmgr_gem->bo_reuse = true;
277922944501Smrg}
278022944501Smrg
278122944501Smrg/**
278222944501Smrg * Enable use of fenced reloc type.
278322944501Smrg *
278422944501Smrg * New code should enable this to avoid unnecessary fence register
278522944501Smrg * allocation.  If this option is not enabled, all relocs will have fence
278622944501Smrg * register allocated.
278722944501Smrg */
2788a884aba1Smrgdrm_public void
278922944501Smrgdrm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
279022944501Smrg{
279122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
279222944501Smrg
279322944501Smrg	if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
279420131375Smrg		bufmgr_gem->fenced_relocs = true;
279522944501Smrg}
279622944501Smrg
279722944501Smrg/**
279822944501Smrg * Return the additional aperture space required by the tree of buffer objects
279922944501Smrg * rooted at bo.
280022944501Smrg */
280122944501Smrgstatic int
280222944501Smrgdrm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
280322944501Smrg{
280422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
280522944501Smrg	int i;
280622944501Smrg	int total = 0;
280722944501Smrg
280822944501Smrg	if (bo == NULL || bo_gem->included_in_check_aperture)
280922944501Smrg		return 0;
281022944501Smrg
281122944501Smrg	total += bo->size;
281220131375Smrg	bo_gem->included_in_check_aperture = true;
281322944501Smrg
281422944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++)
281522944501Smrg		total +=
281622944501Smrg		    drm_intel_gem_bo_get_aperture_space(bo_gem->
281722944501Smrg							reloc_target_info[i].bo);
281822944501Smrg
281922944501Smrg	return total;
282022944501Smrg}
282122944501Smrg
282222944501Smrg/**
282322944501Smrg * Count the number of buffers in this list that need a fence reg
282422944501Smrg *
282522944501Smrg * If the count is greater than the number of available regs, we'll have
282622944501Smrg * to ask the caller to resubmit a batch with fewer tiled buffers.
282722944501Smrg *
282822944501Smrg * This function over-counts if the same buffer is used multiple times.
282922944501Smrg */
283022944501Smrgstatic unsigned int
283122944501Smrgdrm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
283222944501Smrg{
283322944501Smrg	int i;
283422944501Smrg	unsigned int total = 0;
283522944501Smrg
283622944501Smrg	for (i = 0; i < count; i++) {
283722944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
283822944501Smrg
283922944501Smrg		if (bo_gem == NULL)
284022944501Smrg			continue;
284122944501Smrg
284222944501Smrg		total += bo_gem->reloc_tree_fences;
284322944501Smrg	}
284422944501Smrg	return total;
284522944501Smrg}
284622944501Smrg
284722944501Smrg/**
284822944501Smrg * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
284922944501Smrg * for the next drm_intel_bufmgr_check_aperture_space() call.
285022944501Smrg */
285122944501Smrgstatic void
285222944501Smrgdrm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
285322944501Smrg{
285422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
285522944501Smrg	int i;
285622944501Smrg
285722944501Smrg	if (bo == NULL || !bo_gem->included_in_check_aperture)
285822944501Smrg		return;
285922944501Smrg
286020131375Smrg	bo_gem->included_in_check_aperture = false;
286122944501Smrg
286222944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++)
286322944501Smrg		drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
286422944501Smrg							   reloc_target_info[i].bo);
286522944501Smrg}
286622944501Smrg
286722944501Smrg/**
286822944501Smrg * Return a conservative estimate for the amount of aperture required
286922944501Smrg * for a collection of buffers. This may double-count some buffers.
287022944501Smrg */
287122944501Smrgstatic unsigned int
287222944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
287322944501Smrg{
287422944501Smrg	int i;
287522944501Smrg	unsigned int total = 0;
287622944501Smrg
287722944501Smrg	for (i = 0; i < count; i++) {
287822944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
287922944501Smrg		if (bo_gem != NULL)
288022944501Smrg			total += bo_gem->reloc_tree_size;
288122944501Smrg	}
288222944501Smrg	return total;
288322944501Smrg}
288422944501Smrg
288522944501Smrg/**
288622944501Smrg * Return the amount of aperture needed for a collection of buffers.
288722944501Smrg * This avoids double counting any buffers, at the cost of looking
288822944501Smrg * at every buffer in the set.
288922944501Smrg */
289022944501Smrgstatic unsigned int
289122944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
289222944501Smrg{
289322944501Smrg	int i;
289422944501Smrg	unsigned int total = 0;
289522944501Smrg
289622944501Smrg	for (i = 0; i < count; i++) {
289722944501Smrg		total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
289822944501Smrg		/* For the first buffer object in the array, we get an
289922944501Smrg		 * accurate count back for its reloc_tree size (since nothing
290022944501Smrg		 * had been flagged as being counted yet).  We can save that
290122944501Smrg		 * value out as a more conservative reloc_tree_size that
290222944501Smrg		 * avoids double-counting target buffers.  Since the first
290322944501Smrg		 * buffer happens to usually be the batch buffer in our
290422944501Smrg		 * callers, this can pull us back from doing the tree
290522944501Smrg		 * walk on every new batch emit.
290622944501Smrg		 */
290722944501Smrg		if (i == 0) {
290822944501Smrg			drm_intel_bo_gem *bo_gem =
290922944501Smrg			    (drm_intel_bo_gem *) bo_array[i];
291022944501Smrg			bo_gem->reloc_tree_size = total;
291122944501Smrg		}
291222944501Smrg	}
291322944501Smrg
291422944501Smrg	for (i = 0; i < count; i++)
291522944501Smrg		drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
291622944501Smrg	return total;
291722944501Smrg}
291822944501Smrg
291922944501Smrg/**
292022944501Smrg * Return -1 if the batchbuffer should be flushed before attempting to
292122944501Smrg * emit rendering referencing the buffers pointed to by bo_array.
292222944501Smrg *
292322944501Smrg * This is required because if we try to emit a batchbuffer with relocations
292422944501Smrg * to a tree of buffers that won't simultaneously fit in the aperture,
292522944501Smrg * the rendering will return an error at a point where the software is not
292622944501Smrg * prepared to recover from it.
292722944501Smrg *
292822944501Smrg * However, we also want to emit the batchbuffer significantly before we reach
292922944501Smrg * the limit, as a series of batchbuffers each of which references buffers
293022944501Smrg * covering almost all of the aperture means that at each emit we end up
293122944501Smrg * waiting to evict a buffer from the last rendering, and we get synchronous
293222944501Smrg * performance.  By emitting smaller batchbuffers, we eat some CPU overhead to
293322944501Smrg * get better parallelism.
293422944501Smrg */
293522944501Smrgstatic int
293622944501Smrgdrm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
293722944501Smrg{
293822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem =
293922944501Smrg	    (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
294022944501Smrg	unsigned int total = 0;
294122944501Smrg	unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
294222944501Smrg	int total_fences;
294322944501Smrg
294422944501Smrg	/* Check for fence reg constraints if necessary */
294522944501Smrg	if (bufmgr_gem->available_fences) {
294622944501Smrg		total_fences = drm_intel_gem_total_fences(bo_array, count);
294722944501Smrg		if (total_fences > bufmgr_gem->available_fences)
294822944501Smrg			return -ENOSPC;
294922944501Smrg	}
295022944501Smrg
295122944501Smrg	total = drm_intel_gem_estimate_batch_space(bo_array, count);
295222944501Smrg
295322944501Smrg	if (total > threshold)
295422944501Smrg		total = drm_intel_gem_compute_batch_space(bo_array, count);
295522944501Smrg
295622944501Smrg	if (total > threshold) {
295722944501Smrg		DBG("check_space: overflowed available aperture, "
295822944501Smrg		    "%dkb vs %dkb\n",
295922944501Smrg		    total / 1024, (int)bufmgr_gem->gtt_size / 1024);
296022944501Smrg		return -ENOSPC;
296122944501Smrg	} else {
296222944501Smrg		DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
296322944501Smrg		    (int)bufmgr_gem->gtt_size / 1024);
296422944501Smrg		return 0;
296522944501Smrg	}
296622944501Smrg}
296722944501Smrg
296822944501Smrg/*
296922944501Smrg * Disable buffer reuse for objects which are shared with the kernel
297022944501Smrg * as scanout buffers
297122944501Smrg */
297222944501Smrgstatic int
297322944501Smrgdrm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
297422944501Smrg{
297522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
297622944501Smrg
297720131375Smrg	bo_gem->reusable = false;
297822944501Smrg	return 0;
297922944501Smrg}
298022944501Smrg
2981aaba2545Smrgstatic int
2982aaba2545Smrgdrm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2983aaba2545Smrg{
2984aaba2545Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2985aaba2545Smrg
2986aaba2545Smrg	return bo_gem->reusable;
2987aaba2545Smrg}
2988aaba2545Smrg
298922944501Smrgstatic int
299022944501Smrg_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
299122944501Smrg{
299222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
299322944501Smrg	int i;
299422944501Smrg
299522944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
299622944501Smrg		if (bo_gem->reloc_target_info[i].bo == target_bo)
299722944501Smrg			return 1;
2998aaba2545Smrg		if (bo == bo_gem->reloc_target_info[i].bo)
2999aaba2545Smrg			continue;
300022944501Smrg		if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
300122944501Smrg						target_bo))
300222944501Smrg			return 1;
300322944501Smrg	}
300422944501Smrg
300522944501Smrg	return 0;
300622944501Smrg}
300722944501Smrg
300822944501Smrg/** Return true if target_bo is referenced by bo's relocation tree. */
300922944501Smrgstatic int
301022944501Smrgdrm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
301122944501Smrg{
301222944501Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
301322944501Smrg
301422944501Smrg	if (bo == NULL || target_bo == NULL)
301522944501Smrg		return 0;
301622944501Smrg	if (target_bo_gem->used_as_reloc_target)
301722944501Smrg		return _drm_intel_gem_bo_references(bo, target_bo);
301822944501Smrg	return 0;
301922944501Smrg}
302022944501Smrg
3021aaba2545Smrgstatic void
3022aaba2545Smrgadd_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3023aaba2545Smrg{
3024aaba2545Smrg	unsigned int i = bufmgr_gem->num_buckets;
3025aaba2545Smrg
3026aaba2545Smrg	assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3027aaba2545Smrg
3028aaba2545Smrg	DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3029aaba2545Smrg	bufmgr_gem->cache_bucket[i].size = size;
3030aaba2545Smrg	bufmgr_gem->num_buckets++;
3031aaba2545Smrg}
3032aaba2545Smrg
3033aaba2545Smrgstatic void
3034aaba2545Smrginit_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3035aaba2545Smrg{
3036aaba2545Smrg	unsigned long size, cache_max_size = 64 * 1024 * 1024;
3037aaba2545Smrg
3038aaba2545Smrg	/* OK, so power of two buckets was too wasteful of memory.
3039aaba2545Smrg	 * Give 3 other sizes between each power of two, to hopefully
3040aaba2545Smrg	 * cover things accurately enough.  (The alternative is
3041aaba2545Smrg	 * probably to just go for exact matching of sizes, and assume
3042aaba2545Smrg	 * that for things like composited window resize the tiled
3043aaba2545Smrg	 * width/height alignment and rounding of sizes to pages will
3044aaba2545Smrg	 * get us useful cache hit rates anyway)
3045aaba2545Smrg	 */
3046aaba2545Smrg	add_bucket(bufmgr_gem, 4096);
3047aaba2545Smrg	add_bucket(bufmgr_gem, 4096 * 2);
3048aaba2545Smrg	add_bucket(bufmgr_gem, 4096 * 3);
3049aaba2545Smrg
3050aaba2545Smrg	/* Initialize the linked lists for BO reuse cache. */
3051aaba2545Smrg	for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3052aaba2545Smrg		add_bucket(bufmgr_gem, size);
3053aaba2545Smrg
3054aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 1 / 4);
3055aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 2 / 4);
3056aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 3 / 4);
3057aaba2545Smrg	}
3058aaba2545Smrg}
3059aaba2545Smrg
3060a884aba1Smrgdrm_public void
306120131375Smrgdrm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
306220131375Smrg{
306320131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
306420131375Smrg
306520131375Smrg	bufmgr_gem->vma_max = limit;
306620131375Smrg
306720131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
306820131375Smrg}
306920131375Smrg
307020131375Smrg/**
307120131375Smrg * Get the PCI ID for the device.  This can be overridden by setting the
307220131375Smrg * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
307320131375Smrg */
307420131375Smrgstatic int
307520131375Smrgget_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
307620131375Smrg{
307720131375Smrg	char *devid_override;
307820131375Smrg	int devid;
307920131375Smrg	int ret;
308020131375Smrg	drm_i915_getparam_t gp;
308120131375Smrg
308220131375Smrg	if (geteuid() == getuid()) {
308320131375Smrg		devid_override = getenv("INTEL_DEVID_OVERRIDE");
308420131375Smrg		if (devid_override) {
308520131375Smrg			bufmgr_gem->no_exec = true;
308620131375Smrg			return strtod(devid_override, NULL);
308720131375Smrg		}
308820131375Smrg	}
308920131375Smrg
309020131375Smrg	VG_CLEAR(devid);
309120131375Smrg	VG_CLEAR(gp);
309220131375Smrg	gp.param = I915_PARAM_CHIPSET_ID;
309320131375Smrg	gp.value = &devid;
309420131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
309520131375Smrg	if (ret) {
309620131375Smrg		fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
309720131375Smrg		fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
309820131375Smrg	}
309920131375Smrg	return devid;
310020131375Smrg}
310120131375Smrg
3102a884aba1Smrgdrm_public int
310320131375Smrgdrm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
310420131375Smrg{
310520131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
310620131375Smrg
310720131375Smrg	return bufmgr_gem->pci_device;
310820131375Smrg}
310920131375Smrg
311020131375Smrg/**
311120131375Smrg * Sets the AUB filename.
311220131375Smrg *
311320131375Smrg * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
311420131375Smrg * for it to have any effect.
311520131375Smrg */
3116a884aba1Smrgdrm_public void
311720131375Smrgdrm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
311820131375Smrg				      const char *filename)
311920131375Smrg{
312020131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
312120131375Smrg
312220131375Smrg	free(bufmgr_gem->aub_filename);
312320131375Smrg	if (filename)
312420131375Smrg		bufmgr_gem->aub_filename = strdup(filename);
312520131375Smrg}
312620131375Smrg
312720131375Smrg/**
312820131375Smrg * Sets up AUB dumping.
312920131375Smrg *
313020131375Smrg * This is a trace file format that can be used with the simulator.
313120131375Smrg * Packets are emitted in a format somewhat like GPU command packets.
313220131375Smrg * You can set up a GTT and upload your objects into the referenced
313320131375Smrg * space, then send off batchbuffers and get BMPs out the other end.
313420131375Smrg */
3135a884aba1Smrgdrm_public void
313620131375Smrgdrm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
313720131375Smrg{
313820131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
313920131375Smrg	int entry = 0x200003;
314020131375Smrg	int i;
314120131375Smrg	int gtt_size = 0x10000;
314220131375Smrg	const char *filename;
314320131375Smrg
314420131375Smrg	if (!enable) {
314520131375Smrg		if (bufmgr_gem->aub_file) {
314620131375Smrg			fclose(bufmgr_gem->aub_file);
314720131375Smrg			bufmgr_gem->aub_file = NULL;
314820131375Smrg		}
314920131375Smrg		return;
315020131375Smrg	}
315120131375Smrg
315220131375Smrg	if (geteuid() != getuid())
315320131375Smrg		return;
315420131375Smrg
315520131375Smrg	if (bufmgr_gem->aub_filename)
315620131375Smrg		filename = bufmgr_gem->aub_filename;
315720131375Smrg	else
315820131375Smrg		filename = "intel.aub";
315920131375Smrg	bufmgr_gem->aub_file = fopen(filename, "w+");
316020131375Smrg	if (!bufmgr_gem->aub_file)
316120131375Smrg		return;
316220131375Smrg
316320131375Smrg	/* Start allocating objects from just after the GTT. */
316420131375Smrg	bufmgr_gem->aub_offset = gtt_size;
316520131375Smrg
316620131375Smrg	/* Start with a (required) version packet. */
316720131375Smrg	aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
316820131375Smrg	aub_out(bufmgr_gem,
316920131375Smrg		(4 << AUB_HEADER_MAJOR_SHIFT) |
317020131375Smrg		(0 << AUB_HEADER_MINOR_SHIFT));
317120131375Smrg	for (i = 0; i < 8; i++) {
317220131375Smrg		aub_out(bufmgr_gem, 0); /* app name */
317320131375Smrg	}
317420131375Smrg	aub_out(bufmgr_gem, 0); /* timestamp */
317520131375Smrg	aub_out(bufmgr_gem, 0); /* timestamp */
317620131375Smrg	aub_out(bufmgr_gem, 0); /* comment len */
317720131375Smrg
317820131375Smrg	/* Set up the GTT. The max we can handle is 256M */
317920131375Smrg	aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
318020131375Smrg	aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
318120131375Smrg	aub_out(bufmgr_gem, 0); /* subtype */
318220131375Smrg	aub_out(bufmgr_gem, 0); /* offset */
318320131375Smrg	aub_out(bufmgr_gem, gtt_size); /* size */
318420131375Smrg	if (bufmgr_gem->gen >= 8)
318520131375Smrg		aub_out(bufmgr_gem, 0);
318620131375Smrg	for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
318720131375Smrg		aub_out(bufmgr_gem, entry);
318820131375Smrg	}
318920131375Smrg}
319020131375Smrg
3191a884aba1Smrgdrm_public drm_intel_context *
319220131375Smrgdrm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
319320131375Smrg{
319420131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
319520131375Smrg	struct drm_i915_gem_context_create create;
319620131375Smrg	drm_intel_context *context = NULL;
319720131375Smrg	int ret;
319820131375Smrg
319920131375Smrg	context = calloc(1, sizeof(*context));
320020131375Smrg	if (!context)
320120131375Smrg		return NULL;
320220131375Smrg
320320131375Smrg	VG_CLEAR(create);
320420131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
320520131375Smrg	if (ret != 0) {
320620131375Smrg		DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
320720131375Smrg		    strerror(errno));
320820131375Smrg		free(context);
320920131375Smrg		return NULL;
321020131375Smrg	}
321120131375Smrg
321220131375Smrg	context->ctx_id = create.ctx_id;
321320131375Smrg	context->bufmgr = bufmgr;
321420131375Smrg
321520131375Smrg	return context;
321620131375Smrg}
321720131375Smrg
3218a884aba1Smrgdrm_public void
321920131375Smrgdrm_intel_gem_context_destroy(drm_intel_context *ctx)
322020131375Smrg{
322120131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
322220131375Smrg	struct drm_i915_gem_context_destroy destroy;
322320131375Smrg	int ret;
322420131375Smrg
322520131375Smrg	if (ctx == NULL)
322620131375Smrg		return;
322720131375Smrg
322820131375Smrg	VG_CLEAR(destroy);
322920131375Smrg
323020131375Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
323120131375Smrg	destroy.ctx_id = ctx->ctx_id;
323220131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
323320131375Smrg		       &destroy);
323420131375Smrg	if (ret != 0)
323520131375Smrg		fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
323620131375Smrg			strerror(errno));
323720131375Smrg
323820131375Smrg	free(ctx);
323920131375Smrg}
324020131375Smrg
3241a884aba1Smrgdrm_public int
324220131375Smrgdrm_intel_get_reset_stats(drm_intel_context *ctx,
324320131375Smrg			  uint32_t *reset_count,
324420131375Smrg			  uint32_t *active,
324520131375Smrg			  uint32_t *pending)
324620131375Smrg{
324720131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
324820131375Smrg	struct drm_i915_reset_stats stats;
324920131375Smrg	int ret;
325020131375Smrg
325120131375Smrg	if (ctx == NULL)
325220131375Smrg		return -EINVAL;
325320131375Smrg
325420131375Smrg	memset(&stats, 0, sizeof(stats));
325520131375Smrg
325620131375Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
325720131375Smrg	stats.ctx_id = ctx->ctx_id;
325820131375Smrg	ret = drmIoctl(bufmgr_gem->fd,
325920131375Smrg		       DRM_IOCTL_I915_GET_RESET_STATS,
326020131375Smrg		       &stats);
326120131375Smrg	if (ret == 0) {
326220131375Smrg		if (reset_count != NULL)
326320131375Smrg			*reset_count = stats.reset_count;
326420131375Smrg
326520131375Smrg		if (active != NULL)
326620131375Smrg			*active = stats.batch_active;
326720131375Smrg
326820131375Smrg		if (pending != NULL)
326920131375Smrg			*pending = stats.batch_pending;
327020131375Smrg	}
327120131375Smrg
327220131375Smrg	return ret;
327320131375Smrg}
327420131375Smrg
3275a884aba1Smrgdrm_public int
327620131375Smrgdrm_intel_reg_read(drm_intel_bufmgr *bufmgr,
327720131375Smrg		   uint32_t offset,
327820131375Smrg		   uint64_t *result)
327920131375Smrg{
328020131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
328120131375Smrg	struct drm_i915_reg_read reg_read;
328220131375Smrg	int ret;
328320131375Smrg
328420131375Smrg	VG_CLEAR(reg_read);
328520131375Smrg	reg_read.offset = offset;
328620131375Smrg
328720131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
328820131375Smrg
328920131375Smrg	*result = reg_read.val;
329020131375Smrg	return ret;
329120131375Smrg}
329220131375Smrg
329320131375Smrg
329420131375Smrg/**
329520131375Smrg * Annotate the given bo for use in aub dumping.
329620131375Smrg *
329720131375Smrg * \param annotations is an array of drm_intel_aub_annotation objects
329820131375Smrg * describing the type of data in various sections of the bo.  Each
329920131375Smrg * element of the array specifies the type and subtype of a section of
330020131375Smrg * the bo, and the past-the-end offset of that section.  The elements
330120131375Smrg * of \c annotations must be sorted so that ending_offset is
330220131375Smrg * increasing.
330320131375Smrg *
330420131375Smrg * \param count is the number of elements in the \c annotations array.
330520131375Smrg * If \c count is zero, then \c annotations will not be dereferenced.
330620131375Smrg *
330720131375Smrg * Annotations are copied into a private data structure, so caller may
330820131375Smrg * re-use the memory pointed to by \c annotations after the call
330920131375Smrg * returns.
331020131375Smrg *
331120131375Smrg * Annotations are stored for the lifetime of the bo; to reset to the
331220131375Smrg * default state (no annotations), call this function with a \c count
331320131375Smrg * of zero.
331420131375Smrg */
3315a884aba1Smrgdrm_public void
331620131375Smrgdrm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
331720131375Smrg					 drm_intel_aub_annotation *annotations,
331820131375Smrg					 unsigned count)
331920131375Smrg{
332020131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
332120131375Smrg	unsigned size = sizeof(*annotations) * count;
332220131375Smrg	drm_intel_aub_annotation *new_annotations =
332320131375Smrg		count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
332420131375Smrg	if (new_annotations == NULL) {
332520131375Smrg		free(bo_gem->aub_annotations);
332620131375Smrg		bo_gem->aub_annotations = NULL;
332720131375Smrg		bo_gem->aub_annotation_count = 0;
332820131375Smrg		return;
332920131375Smrg	}
333020131375Smrg	memcpy(new_annotations, annotations, size);
333120131375Smrg	bo_gem->aub_annotations = new_annotations;
333220131375Smrg	bo_gem->aub_annotation_count = count;
333320131375Smrg}
333420131375Smrg
3335a884aba1Smrgstatic pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3336a884aba1Smrgstatic drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3337a884aba1Smrg
3338a884aba1Smrgstatic drm_intel_bufmgr_gem *
3339a884aba1Smrgdrm_intel_bufmgr_gem_find(int fd)
3340a884aba1Smrg{
3341a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
3342a884aba1Smrg
3343a884aba1Smrg	DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3344a884aba1Smrg		if (bufmgr_gem->fd == fd) {
3345a884aba1Smrg			atomic_inc(&bufmgr_gem->refcount);
3346a884aba1Smrg			return bufmgr_gem;
3347a884aba1Smrg		}
3348a884aba1Smrg	}
3349a884aba1Smrg
3350a884aba1Smrg	return NULL;
3351a884aba1Smrg}
3352a884aba1Smrg
3353a884aba1Smrgstatic void
3354a884aba1Smrgdrm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3355a884aba1Smrg{
3356a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3357a884aba1Smrg
3358a884aba1Smrg	if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3359a884aba1Smrg		pthread_mutex_lock(&bufmgr_list_mutex);
3360a884aba1Smrg
3361a884aba1Smrg		if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3362a884aba1Smrg			DRMLISTDEL(&bufmgr_gem->managers);
3363a884aba1Smrg			drm_intel_bufmgr_gem_destroy(bufmgr);
3364a884aba1Smrg		}
3365a884aba1Smrg
3366a884aba1Smrg		pthread_mutex_unlock(&bufmgr_list_mutex);
3367a884aba1Smrg	}
3368a884aba1Smrg}
3369a884aba1Smrg
3370a884aba1Smrgstatic bool
3371a884aba1Smrghas_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
3372a884aba1Smrg{
3373a884aba1Smrg	int ret;
3374a884aba1Smrg	void *ptr;
3375a884aba1Smrg	long pgsz;
3376a884aba1Smrg	struct drm_i915_gem_userptr userptr;
3377a884aba1Smrg	struct drm_gem_close close_bo;
3378a884aba1Smrg
3379a884aba1Smrg	pgsz = sysconf(_SC_PAGESIZE);
3380a884aba1Smrg	assert(pgsz > 0);
3381a884aba1Smrg
3382a884aba1Smrg	ret = posix_memalign(&ptr, pgsz, pgsz);
3383a884aba1Smrg	if (ret) {
3384a884aba1Smrg		DBG("Failed to get a page (%ld) for userptr detection!\n",
3385a884aba1Smrg			pgsz);
3386a884aba1Smrg		return false;
3387a884aba1Smrg	}
3388a884aba1Smrg
3389a884aba1Smrg	memset(&userptr, 0, sizeof(userptr));
3390a884aba1Smrg	userptr.user_ptr = (__u64)(unsigned long)ptr;
3391a884aba1Smrg	userptr.user_size = pgsz;
3392a884aba1Smrg
3393a884aba1Smrgretry:
3394a884aba1Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
3395a884aba1Smrg	if (ret) {
3396a884aba1Smrg		if (errno == ENODEV && userptr.flags == 0) {
3397a884aba1Smrg			userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
3398a884aba1Smrg			goto retry;
3399a884aba1Smrg		}
3400a884aba1Smrg		free(ptr);
3401a884aba1Smrg		return false;
3402a884aba1Smrg	}
3403a884aba1Smrg
3404a884aba1Smrg	close_bo.handle = userptr.handle;
3405a884aba1Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
3406a884aba1Smrg	free(ptr);
3407a884aba1Smrg	if (ret) {
3408a884aba1Smrg		fprintf(stderr, "Failed to release test userptr object! (%d) "
3409a884aba1Smrg				"i915 kernel driver may not be sane!\n", errno);
3410a884aba1Smrg		return false;
3411a884aba1Smrg	}
3412a884aba1Smrg
3413a884aba1Smrg	return true;
3414a884aba1Smrg}
3415a884aba1Smrg
341622944501Smrg/**
341722944501Smrg * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
341822944501Smrg * and manage map buffer objections.
341922944501Smrg *
342022944501Smrg * \param fd File descriptor of the opened DRM device.
342122944501Smrg */
3422a884aba1Smrgdrm_public drm_intel_bufmgr *
342322944501Smrgdrm_intel_bufmgr_gem_init(int fd, int batch_size)
342422944501Smrg{
342522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
342622944501Smrg	struct drm_i915_gem_get_aperture aperture;
342722944501Smrg	drm_i915_getparam_t gp;
342820131375Smrg	int ret, tmp;
342920131375Smrg	bool exec2 = false;
343022944501Smrg
3431a884aba1Smrg	pthread_mutex_lock(&bufmgr_list_mutex);
3432a884aba1Smrg
3433a884aba1Smrg	bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3434a884aba1Smrg	if (bufmgr_gem)
3435a884aba1Smrg		goto exit;
3436a884aba1Smrg
343722944501Smrg	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
343822944501Smrg	if (bufmgr_gem == NULL)
3439a884aba1Smrg		goto exit;
344022944501Smrg
344122944501Smrg	bufmgr_gem->fd = fd;
3442a884aba1Smrg	atomic_set(&bufmgr_gem->refcount, 1);
344322944501Smrg
344422944501Smrg	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
344522944501Smrg		free(bufmgr_gem);
3446a884aba1Smrg		bufmgr_gem = NULL;
3447a884aba1Smrg		goto exit;
344822944501Smrg	}
344922944501Smrg
34506d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
34516d98c517Smrg		       DRM_IOCTL_I915_GEM_GET_APERTURE,
34526d98c517Smrg		       &aperture);
345322944501Smrg
345422944501Smrg	if (ret == 0)
345522944501Smrg		bufmgr_gem->gtt_size = aperture.aper_available_size;
345622944501Smrg	else {
345722944501Smrg		fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
345822944501Smrg			strerror(errno));
345922944501Smrg		bufmgr_gem->gtt_size = 128 * 1024 * 1024;
346022944501Smrg		fprintf(stderr, "Assuming %dkB available aperture size.\n"
346122944501Smrg			"May lead to reduced performance or incorrect "
346222944501Smrg			"rendering.\n",
346322944501Smrg			(int)bufmgr_gem->gtt_size / 1024);
346422944501Smrg	}
346522944501Smrg
346620131375Smrg	bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
346722944501Smrg
346820131375Smrg	if (IS_GEN2(bufmgr_gem->pci_device))
346922944501Smrg		bufmgr_gem->gen = 2;
347020131375Smrg	else if (IS_GEN3(bufmgr_gem->pci_device))
347122944501Smrg		bufmgr_gem->gen = 3;
347220131375Smrg	else if (IS_GEN4(bufmgr_gem->pci_device))
347322944501Smrg		bufmgr_gem->gen = 4;
347420131375Smrg	else if (IS_GEN5(bufmgr_gem->pci_device))
347520131375Smrg		bufmgr_gem->gen = 5;
347620131375Smrg	else if (IS_GEN6(bufmgr_gem->pci_device))
347722944501Smrg		bufmgr_gem->gen = 6;
347820131375Smrg	else if (IS_GEN7(bufmgr_gem->pci_device))
347920131375Smrg		bufmgr_gem->gen = 7;
348020131375Smrg	else if (IS_GEN8(bufmgr_gem->pci_device))
348120131375Smrg		bufmgr_gem->gen = 8;
348220131375Smrg	else {
348320131375Smrg		free(bufmgr_gem);
3484a884aba1Smrg		bufmgr_gem = NULL;
3485a884aba1Smrg		goto exit;
348620131375Smrg	}
348720131375Smrg
348820131375Smrg	if (IS_GEN3(bufmgr_gem->pci_device) &&
348920131375Smrg	    bufmgr_gem->gtt_size > 256*1024*1024) {
349020131375Smrg		/* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
349120131375Smrg		 * be used for tiled blits. To simplify the accounting, just
349220131375Smrg		 * substract the unmappable part (fixed to 256MB on all known
349320131375Smrg		 * gen3 devices) if the kernel advertises it. */
349420131375Smrg		bufmgr_gem->gtt_size -= 256*1024*1024;
349520131375Smrg	}
349620131375Smrg
349720131375Smrg	VG_CLEAR(gp);
349820131375Smrg	gp.value = &tmp;
349922944501Smrg
350022944501Smrg	gp.param = I915_PARAM_HAS_EXECBUF2;
35016d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
350222944501Smrg	if (!ret)
350320131375Smrg		exec2 = true;
350422944501Smrg
3505aaba2545Smrg	gp.param = I915_PARAM_HAS_BSD;
35066d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
35079ce4edccSmrg	bufmgr_gem->has_bsd = ret == 0;
35089ce4edccSmrg
35099ce4edccSmrg	gp.param = I915_PARAM_HAS_BLT;
35109ce4edccSmrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
35119ce4edccSmrg	bufmgr_gem->has_blt = ret == 0;
35129ce4edccSmrg
35139ce4edccSmrg	gp.param = I915_PARAM_HAS_RELAXED_FENCING;
35149ce4edccSmrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
35159ce4edccSmrg	bufmgr_gem->has_relaxed_fencing = ret == 0;
3516aaba2545Smrg
3517a884aba1Smrg	if (has_userptr(bufmgr_gem))
3518a884aba1Smrg		bufmgr_gem->bufmgr.bo_alloc_userptr =
3519a884aba1Smrg			drm_intel_gem_bo_alloc_userptr;
3520a884aba1Smrg
352120131375Smrg	gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
352220131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
352320131375Smrg	bufmgr_gem->has_wait_timeout = ret == 0;
352420131375Smrg
352520131375Smrg	gp.param = I915_PARAM_HAS_LLC;
352620131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
352720131375Smrg	if (ret != 0) {
352820131375Smrg		/* Kernel does not supports HAS_LLC query, fallback to GPU
352920131375Smrg		 * generation detection and assume that we have LLC on GEN6/7
353020131375Smrg		 */
353120131375Smrg		bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
353220131375Smrg				IS_GEN7(bufmgr_gem->pci_device));
353320131375Smrg	} else
353420131375Smrg		bufmgr_gem->has_llc = *gp.value;
353520131375Smrg
353620131375Smrg	gp.param = I915_PARAM_HAS_VEBOX;
353720131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
353820131375Smrg	bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
353920131375Smrg
354022944501Smrg	if (bufmgr_gem->gen < 4) {
354122944501Smrg		gp.param = I915_PARAM_NUM_FENCES_AVAIL;
354222944501Smrg		gp.value = &bufmgr_gem->available_fences;
35436d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
354422944501Smrg		if (ret) {
354522944501Smrg			fprintf(stderr, "get fences failed: %d [%d]\n", ret,
354622944501Smrg				errno);
354722944501Smrg			fprintf(stderr, "param: %d, val: %d\n", gp.param,
354822944501Smrg				*gp.value);
354922944501Smrg			bufmgr_gem->available_fences = 0;
355022944501Smrg		} else {
355122944501Smrg			/* XXX The kernel reports the total number of fences,
355222944501Smrg			 * including any that may be pinned.
355322944501Smrg			 *
355422944501Smrg			 * We presume that there will be at least one pinned
355522944501Smrg			 * fence for the scanout buffer, but there may be more
355622944501Smrg			 * than one scanout and the user may be manually
355722944501Smrg			 * pinning buffers. Let's move to execbuffer2 and
355822944501Smrg			 * thereby forget the insanity of using fences...
355922944501Smrg			 */
356022944501Smrg			bufmgr_gem->available_fences -= 2;
356122944501Smrg			if (bufmgr_gem->available_fences < 0)
356222944501Smrg				bufmgr_gem->available_fences = 0;
356322944501Smrg		}
356422944501Smrg	}
356522944501Smrg
356622944501Smrg	/* Let's go with one relocation per every 2 dwords (but round down a bit
356722944501Smrg	 * since a power of two will mean an extra page allocation for the reloc
356822944501Smrg	 * buffer).
356922944501Smrg	 *
357022944501Smrg	 * Every 4 was too few for the blender benchmark.
357122944501Smrg	 */
357222944501Smrg	bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
357322944501Smrg
357422944501Smrg	bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
357522944501Smrg	bufmgr_gem->bufmgr.bo_alloc_for_render =
357622944501Smrg	    drm_intel_gem_bo_alloc_for_render;
357722944501Smrg	bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
357822944501Smrg	bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
357922944501Smrg	bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
358022944501Smrg	bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
358122944501Smrg	bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
358222944501Smrg	bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
358322944501Smrg	bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
358422944501Smrg	bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
358522944501Smrg	bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
358622944501Smrg	bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
358722944501Smrg	bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
358822944501Smrg	bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
358922944501Smrg	bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
359022944501Smrg	bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
359122944501Smrg	bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
359222944501Smrg	/* Use the new one if available */
3593aaba2545Smrg	if (exec2) {
359422944501Smrg		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
35959ce4edccSmrg		bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3596aaba2545Smrg	} else
359722944501Smrg		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
359822944501Smrg	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
359922944501Smrg	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3600a884aba1Smrg	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
360122944501Smrg	bufmgr_gem->bufmgr.debug = 0;
360222944501Smrg	bufmgr_gem->bufmgr.check_aperture_space =
360322944501Smrg	    drm_intel_gem_check_aperture_space;
360422944501Smrg	bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3605aaba2545Smrg	bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
360622944501Smrg	bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
360722944501Smrg	    drm_intel_gem_get_pipe_from_crtc_id;
360822944501Smrg	bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
360922944501Smrg
361020131375Smrg	DRMINITLISTHEAD(&bufmgr_gem->named);
3611aaba2545Smrg	init_cache_buckets(bufmgr_gem);
361222944501Smrg
361320131375Smrg	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
361420131375Smrg	bufmgr_gem->vma_max = -1; /* unlimited by default */
361520131375Smrg
3616a884aba1Smrg	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3617a884aba1Smrg
3618a884aba1Smrgexit:
3619a884aba1Smrg	pthread_mutex_unlock(&bufmgr_list_mutex);
3620a884aba1Smrg
3621a884aba1Smrg	return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
362222944501Smrg}
3623