intel_bufmgr_gem.c revision 0655efef
122944501Smrg/**************************************************************************
222944501Smrg *
322944501Smrg * Copyright � 2007 Red Hat Inc.
420131375Smrg * Copyright � 2007-2012 Intel Corporation
522944501Smrg * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
622944501Smrg * All Rights Reserved.
722944501Smrg *
822944501Smrg * Permission is hereby granted, free of charge, to any person obtaining a
922944501Smrg * copy of this software and associated documentation files (the
1022944501Smrg * "Software"), to deal in the Software without restriction, including
1122944501Smrg * without limitation the rights to use, copy, modify, merge, publish,
1222944501Smrg * distribute, sub license, and/or sell copies of the Software, and to
1322944501Smrg * permit persons to whom the Software is furnished to do so, subject to
1422944501Smrg * the following conditions:
1522944501Smrg *
1622944501Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1722944501Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1822944501Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1922944501Smrg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
2022944501Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
2122944501Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2222944501Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
2322944501Smrg *
2422944501Smrg * The above copyright notice and this permission notice (including the
2522944501Smrg * next paragraph) shall be included in all copies or substantial portions
2622944501Smrg * of the Software.
2722944501Smrg *
2822944501Smrg *
2922944501Smrg **************************************************************************/
3022944501Smrg/*
3122944501Smrg * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
3222944501Smrg *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
3322944501Smrg *	    Eric Anholt <eric@anholt.net>
3422944501Smrg *	    Dave Airlie <airlied@linux.ie>
3522944501Smrg */
3622944501Smrg
3722944501Smrg#ifdef HAVE_CONFIG_H
3822944501Smrg#include "config.h"
3922944501Smrg#endif
4022944501Smrg
4122944501Smrg#include <xf86drm.h>
4222944501Smrg#include <xf86atomic.h>
4322944501Smrg#include <fcntl.h>
4422944501Smrg#include <stdio.h>
4522944501Smrg#include <stdlib.h>
4622944501Smrg#include <string.h>
4722944501Smrg#include <unistd.h>
4822944501Smrg#include <assert.h>
4922944501Smrg#include <pthread.h>
502e6867f6Smrg#include <stddef.h>
5122944501Smrg#include <sys/ioctl.h>
5222944501Smrg#include <sys/stat.h>
5322944501Smrg#include <sys/types.h>
5420131375Smrg#include <stdbool.h>
5522944501Smrg
5622944501Smrg#include "errno.h"
5720131375Smrg#ifndef ETIME
5820131375Smrg#define ETIME ETIMEDOUT
5920131375Smrg#endif
60424e9256Smrg#include "libdrm_macros.h"
6122944501Smrg#include "libdrm_lists.h"
6222944501Smrg#include "intel_bufmgr.h"
6322944501Smrg#include "intel_bufmgr_priv.h"
6422944501Smrg#include "intel_chipset.h"
6522944501Smrg#include "string.h"
6622944501Smrg
6722944501Smrg#include "i915_drm.h"
682ee35494Smrg#include "uthash.h"
6922944501Smrg
7020131375Smrg#ifdef HAVE_VALGRIND
7120131375Smrg#include <valgrind.h>
7220131375Smrg#include <memcheck.h>
7320131375Smrg#define VG(x) x
7420131375Smrg#else
7520131375Smrg#define VG(x)
7620131375Smrg#endif
7720131375Smrg
78424e9256Smrg#define memclear(s) memset(&s, 0, sizeof(s))
7920131375Smrg
8022944501Smrg#define DBG(...) do {					\
8122944501Smrg	if (bufmgr_gem->bufmgr.debug)			\
8222944501Smrg		fprintf(stderr, __VA_ARGS__);		\
8322944501Smrg} while (0)
8422944501Smrg
85aaba2545Smrg#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86fe517fc9Smrg#define MAX2(A, B) ((A) > (B) ? (A) : (B))
87fe517fc9Smrg
88fe517fc9Smrg/**
89fe517fc9Smrg * upper_32_bits - return bits 32-63 of a number
90fe517fc9Smrg * @n: the number we're accessing
91fe517fc9Smrg *
92fe517fc9Smrg * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
93fe517fc9Smrg * the "right shift count >= width of type" warning when that quantity is
94fe517fc9Smrg * 32-bits.
95fe517fc9Smrg */
96fe517fc9Smrg#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
97fe517fc9Smrg
98fe517fc9Smrg/**
99fe517fc9Smrg * lower_32_bits - return bits 0-31 of a number
100fe517fc9Smrg * @n: the number we're accessing
101fe517fc9Smrg */
102fe517fc9Smrg#define lower_32_bits(n) ((__u32)(n))
103aaba2545Smrg
10422944501Smrgtypedef struct _drm_intel_bo_gem drm_intel_bo_gem;
10522944501Smrg
10622944501Smrgstruct drm_intel_gem_bo_bucket {
10722944501Smrg	drmMMListHead head;
10822944501Smrg	unsigned long size;
10922944501Smrg};
11022944501Smrg
11122944501Smrgtypedef struct _drm_intel_bufmgr_gem {
11222944501Smrg	drm_intel_bufmgr bufmgr;
11322944501Smrg
114a884aba1Smrg	atomic_t refcount;
115a884aba1Smrg
11622944501Smrg	int fd;
11722944501Smrg
11822944501Smrg	int max_relocs;
11922944501Smrg
12022944501Smrg	pthread_mutex_t lock;
12122944501Smrg
12222944501Smrg	struct drm_i915_gem_exec_object *exec_objects;
12322944501Smrg	struct drm_i915_gem_exec_object2 *exec2_objects;
12422944501Smrg	drm_intel_bo **exec_bos;
12522944501Smrg	int exec_size;
12622944501Smrg	int exec_count;
12722944501Smrg
12822944501Smrg	/** Array of lists of cached gem objects of power-of-two sizes */
129aaba2545Smrg	struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
130aaba2545Smrg	int num_buckets;
1316d98c517Smrg	time_t time;
13222944501Smrg
133a884aba1Smrg	drmMMListHead managers;
134a884aba1Smrg
1352ee35494Smrg	drm_intel_bo_gem *name_table;
1362ee35494Smrg	drm_intel_bo_gem *handle_table;
1372ee35494Smrg
13820131375Smrg	drmMMListHead vma_cache;
13920131375Smrg	int vma_count, vma_open, vma_max;
14020131375Smrg
14122944501Smrg	uint64_t gtt_size;
14222944501Smrg	int available_fences;
14322944501Smrg	int pci_device;
14422944501Smrg	int gen;
1459ce4edccSmrg	unsigned int has_bsd : 1;
1469ce4edccSmrg	unsigned int has_blt : 1;
1479ce4edccSmrg	unsigned int has_relaxed_fencing : 1;
14820131375Smrg	unsigned int has_llc : 1;
14920131375Smrg	unsigned int has_wait_timeout : 1;
1509ce4edccSmrg	unsigned int bo_reuse : 1;
15120131375Smrg	unsigned int no_exec : 1;
15220131375Smrg	unsigned int has_vebox : 1;
1532ee35494Smrg	unsigned int has_exec_async : 1;
15420131375Smrg	bool fenced_relocs;
15520131375Smrg
156424e9256Smrg	struct {
157424e9256Smrg		void *ptr;
158424e9256Smrg		uint32_t handle;
159424e9256Smrg	} userptr_active;
160424e9256Smrg
16122944501Smrg} drm_intel_bufmgr_gem;
16222944501Smrg
16322944501Smrg#define DRM_INTEL_RELOC_FENCE (1<<0)
16422944501Smrg
16522944501Smrgtypedef struct _drm_intel_reloc_target_info {
16622944501Smrg	drm_intel_bo *bo;
16722944501Smrg	int flags;
16822944501Smrg} drm_intel_reloc_target;
16922944501Smrg
17022944501Smrgstruct _drm_intel_bo_gem {
17122944501Smrg	drm_intel_bo bo;
17222944501Smrg
17322944501Smrg	atomic_t refcount;
17422944501Smrg	uint32_t gem_handle;
17522944501Smrg	const char *name;
17622944501Smrg
17722944501Smrg	/**
17822944501Smrg	 * Kenel-assigned global name for this object
17920131375Smrg         *
18020131375Smrg         * List contains both flink named and prime fd'd objects
18122944501Smrg	 */
18222944501Smrg	unsigned int global_name;
1832ee35494Smrg
1842ee35494Smrg	UT_hash_handle handle_hh;
1852ee35494Smrg	UT_hash_handle name_hh;
18622944501Smrg
18722944501Smrg	/**
18822944501Smrg	 * Index of the buffer within the validation list while preparing a
18922944501Smrg	 * batchbuffer execution.
19022944501Smrg	 */
19122944501Smrg	int validate_index;
19222944501Smrg
19322944501Smrg	/**
19422944501Smrg	 * Current tiling mode
19522944501Smrg	 */
19622944501Smrg	uint32_t tiling_mode;
19722944501Smrg	uint32_t swizzle_mode;
1986d98c517Smrg	unsigned long stride;
19922944501Smrg
2002ee35494Smrg	unsigned long kflags;
2012ee35494Smrg
20222944501Smrg	time_t free_time;
20322944501Smrg
20422944501Smrg	/** Array passed to the DRM containing relocation information. */
20522944501Smrg	struct drm_i915_gem_relocation_entry *relocs;
20622944501Smrg	/**
20722944501Smrg	 * Array of info structs corresponding to relocs[i].target_handle etc
20822944501Smrg	 */
20922944501Smrg	drm_intel_reloc_target *reloc_target_info;
21022944501Smrg	/** Number of entries in relocs */
21122944501Smrg	int reloc_count;
212fe517fc9Smrg	/** Array of BOs that are referenced by this buffer and will be softpinned */
213fe517fc9Smrg	drm_intel_bo **softpin_target;
214fe517fc9Smrg	/** Number softpinned BOs that are referenced by this buffer */
215fe517fc9Smrg	int softpin_target_count;
216fe517fc9Smrg	/** Maximum amount of softpinned BOs that are referenced by this buffer */
217fe517fc9Smrg	int softpin_target_size;
218fe517fc9Smrg
21922944501Smrg	/** Mapped address for the buffer, saved across map/unmap cycles */
22022944501Smrg	void *mem_virtual;
22122944501Smrg	/** GTT virtual address for the buffer, saved across map/unmap cycles */
22222944501Smrg	void *gtt_virtual;
2232ee35494Smrg	/** WC CPU address for the buffer, saved across map/unmap cycles */
2242ee35494Smrg	void *wc_virtual;
225a884aba1Smrg	/**
226a884aba1Smrg	 * Virtual address of the buffer allocated by user, used for userptr
227a884aba1Smrg	 * objects only.
228a884aba1Smrg	 */
229a884aba1Smrg	void *user_virtual;
23020131375Smrg	int map_count;
23120131375Smrg	drmMMListHead vma_list;
23222944501Smrg
23322944501Smrg	/** BO cache list */
23422944501Smrg	drmMMListHead head;
23522944501Smrg
23622944501Smrg	/**
23722944501Smrg	 * Boolean of whether this BO and its children have been included in
23822944501Smrg	 * the current drm_intel_bufmgr_check_aperture_space() total.
23922944501Smrg	 */
24020131375Smrg	bool included_in_check_aperture;
24122944501Smrg
24222944501Smrg	/**
24322944501Smrg	 * Boolean of whether this buffer has been used as a relocation
24422944501Smrg	 * target and had its size accounted for, and thus can't have any
24522944501Smrg	 * further relocations added to it.
24622944501Smrg	 */
24720131375Smrg	bool used_as_reloc_target;
24822944501Smrg
24922944501Smrg	/**
25022944501Smrg	 * Boolean of whether we have encountered an error whilst building the relocation tree.
25122944501Smrg	 */
25220131375Smrg	bool has_error;
25322944501Smrg
25422944501Smrg	/**
25522944501Smrg	 * Boolean of whether this buffer can be re-used
25622944501Smrg	 */
25720131375Smrg	bool reusable;
25820131375Smrg
25920131375Smrg	/**
26020131375Smrg	 * Boolean of whether the GPU is definitely not accessing the buffer.
26120131375Smrg	 *
26220131375Smrg	 * This is only valid when reusable, since non-reusable
2632ee35494Smrg	 * buffers are those that have been shared with other
26420131375Smrg	 * processes, so we don't know their state.
26520131375Smrg	 */
26620131375Smrg	bool idle;
26722944501Smrg
268a884aba1Smrg	/**
269a884aba1Smrg	 * Boolean of whether this buffer was allocated with userptr
270a884aba1Smrg	 */
271a884aba1Smrg	bool is_userptr;
272a884aba1Smrg
27322944501Smrg	/**
27422944501Smrg	 * Size in bytes of this buffer and its relocation descendents.
27522944501Smrg	 *
27622944501Smrg	 * Used to avoid costly tree walking in
27722944501Smrg	 * drm_intel_bufmgr_check_aperture in the common case.
27822944501Smrg	 */
27922944501Smrg	int reloc_tree_size;
28022944501Smrg
28122944501Smrg	/**
28222944501Smrg	 * Number of potential fence registers required by this buffer and its
28322944501Smrg	 * relocations.
28422944501Smrg	 */
28522944501Smrg	int reloc_tree_fences;
28620131375Smrg
2872ee35494Smrg	/** Flags that we may need to do the SW_FINISH ioctl on unmap. */
28820131375Smrg	bool mapped_cpu_write;
28922944501Smrg};
29022944501Smrg
29122944501Smrgstatic unsigned int
29222944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
29322944501Smrg
29422944501Smrgstatic unsigned int
29522944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
29622944501Smrg
29722944501Smrgstatic int
29822944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
29922944501Smrg			    uint32_t * swizzle_mode);
30022944501Smrg
30122944501Smrgstatic int
3026d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
3036d98c517Smrg				     uint32_t tiling_mode,
3046d98c517Smrg				     uint32_t stride);
30522944501Smrg
30622944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
30722944501Smrg						      time_t time);
30822944501Smrg
30922944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
31022944501Smrg
31122944501Smrgstatic void drm_intel_gem_bo_free(drm_intel_bo *bo);
31222944501Smrg
313fe517fc9Smrgstatic inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
314fe517fc9Smrg{
315fe517fc9Smrg        return (drm_intel_bo_gem *)bo;
316fe517fc9Smrg}
317fe517fc9Smrg
31822944501Smrgstatic unsigned long
31922944501Smrgdrm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
32022944501Smrg			   uint32_t *tiling_mode)
32122944501Smrg{
32222944501Smrg	unsigned long min_size, max_size;
32322944501Smrg	unsigned long i;
32422944501Smrg
32522944501Smrg	if (*tiling_mode == I915_TILING_NONE)
32622944501Smrg		return size;
32722944501Smrg
32822944501Smrg	/* 965+ just need multiples of page size for tiling */
32922944501Smrg	if (bufmgr_gem->gen >= 4)
33022944501Smrg		return ROUND_UP_TO(size, 4096);
33122944501Smrg
33222944501Smrg	/* Older chips need powers of two, of at least 512k or 1M */
33322944501Smrg	if (bufmgr_gem->gen == 3) {
33422944501Smrg		min_size = 1024*1024;
33522944501Smrg		max_size = 128*1024*1024;
33622944501Smrg	} else {
33722944501Smrg		min_size = 512*1024;
33822944501Smrg		max_size = 64*1024*1024;
33922944501Smrg	}
34022944501Smrg
34122944501Smrg	if (size > max_size) {
34222944501Smrg		*tiling_mode = I915_TILING_NONE;
34322944501Smrg		return size;
34422944501Smrg	}
34522944501Smrg
3469ce4edccSmrg	/* Do we need to allocate every page for the fence? */
3479ce4edccSmrg	if (bufmgr_gem->has_relaxed_fencing)
3489ce4edccSmrg		return ROUND_UP_TO(size, 4096);
3499ce4edccSmrg
35022944501Smrg	for (i = min_size; i < size; i <<= 1)
35122944501Smrg		;
35222944501Smrg
35322944501Smrg	return i;
35422944501Smrg}
35522944501Smrg
35622944501Smrg/*
35722944501Smrg * Round a given pitch up to the minimum required for X tiling on a
35822944501Smrg * given chip.  We use 512 as the minimum to allow for a later tiling
35922944501Smrg * change.
36022944501Smrg */
36122944501Smrgstatic unsigned long
36222944501Smrgdrm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
3636d98c517Smrg			    unsigned long pitch, uint32_t *tiling_mode)
36422944501Smrg{
36522944501Smrg	unsigned long tile_width;
36622944501Smrg	unsigned long i;
36722944501Smrg
36822944501Smrg	/* If untiled, then just align it so that we can do rendering
36922944501Smrg	 * to it with the 3D engine.
37022944501Smrg	 */
3716d98c517Smrg	if (*tiling_mode == I915_TILING_NONE)
37222944501Smrg		return ALIGN(pitch, 64);
37322944501Smrg
37420131375Smrg	if (*tiling_mode == I915_TILING_X
37520131375Smrg			|| (IS_915(bufmgr_gem->pci_device)
37620131375Smrg			    && *tiling_mode == I915_TILING_Y))
37722944501Smrg		tile_width = 512;
37822944501Smrg	else
37922944501Smrg		tile_width = 128;
38022944501Smrg
38122944501Smrg	/* 965 is flexible */
38222944501Smrg	if (bufmgr_gem->gen >= 4)
38322944501Smrg		return ROUND_UP_TO(pitch, tile_width);
38422944501Smrg
3856d98c517Smrg	/* The older hardware has a maximum pitch of 8192 with tiled
3866d98c517Smrg	 * surfaces, so fallback to untiled if it's too large.
3876d98c517Smrg	 */
3886d98c517Smrg	if (pitch > 8192) {
3896d98c517Smrg		*tiling_mode = I915_TILING_NONE;
3906d98c517Smrg		return ALIGN(pitch, 64);
3916d98c517Smrg	}
3926d98c517Smrg
39322944501Smrg	/* Pre-965 needs power of two tile width */
39422944501Smrg	for (i = tile_width; i < pitch; i <<= 1)
39522944501Smrg		;
39622944501Smrg
39722944501Smrg	return i;
39822944501Smrg}
39922944501Smrg
40022944501Smrgstatic struct drm_intel_gem_bo_bucket *
40122944501Smrgdrm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
40222944501Smrg				 unsigned long size)
40322944501Smrg{
40422944501Smrg	int i;
40522944501Smrg
406aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
40722944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
40822944501Smrg		    &bufmgr_gem->cache_bucket[i];
40922944501Smrg		if (bucket->size >= size) {
41022944501Smrg			return bucket;
41122944501Smrg		}
41222944501Smrg	}
41322944501Smrg
41422944501Smrg	return NULL;
41522944501Smrg}
41622944501Smrg
41722944501Smrgstatic void
41822944501Smrgdrm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
41922944501Smrg{
42022944501Smrg	int i, j;
42122944501Smrg
42222944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
42322944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
42422944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
42522944501Smrg
426fe517fc9Smrg		if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
427fe517fc9Smrg			DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
4280655efefSmrg			    bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
42922944501Smrg			    bo_gem->name);
43022944501Smrg			continue;
43122944501Smrg		}
43222944501Smrg
43322944501Smrg		for (j = 0; j < bo_gem->reloc_count; j++) {
43422944501Smrg			drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
43522944501Smrg			drm_intel_bo_gem *target_gem =
43622944501Smrg			    (drm_intel_bo_gem *) target_bo;
43722944501Smrg
438fe517fc9Smrg			DBG("%2d: %d %s(%s)@0x%08x %08x -> "
439fe517fc9Smrg			    "%d (%s)@0x%08x %08x + 0x%08x\n",
44022944501Smrg			    i,
441fe517fc9Smrg			    bo_gem->gem_handle,
4420655efefSmrg			    bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
443fe517fc9Smrg			    bo_gem->name,
444fe517fc9Smrg			    upper_32_bits(bo_gem->relocs[j].offset),
445fe517fc9Smrg			    lower_32_bits(bo_gem->relocs[j].offset),
44622944501Smrg			    target_gem->gem_handle,
44722944501Smrg			    target_gem->name,
448fe517fc9Smrg			    upper_32_bits(target_bo->offset64),
449fe517fc9Smrg			    lower_32_bits(target_bo->offset64),
45022944501Smrg			    bo_gem->relocs[j].delta);
45122944501Smrg		}
452fe517fc9Smrg
453fe517fc9Smrg		for (j = 0; j < bo_gem->softpin_target_count; j++) {
454fe517fc9Smrg			drm_intel_bo *target_bo = bo_gem->softpin_target[j];
455fe517fc9Smrg			drm_intel_bo_gem *target_gem =
456fe517fc9Smrg			    (drm_intel_bo_gem *) target_bo;
457fe517fc9Smrg			DBG("%2d: %d %s(%s) -> "
458fe517fc9Smrg			    "%d *(%s)@0x%08x %08x\n",
459fe517fc9Smrg			    i,
460fe517fc9Smrg			    bo_gem->gem_handle,
4610655efefSmrg			    bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
462fe517fc9Smrg			    bo_gem->name,
463fe517fc9Smrg			    target_gem->gem_handle,
464fe517fc9Smrg			    target_gem->name,
465fe517fc9Smrg			    upper_32_bits(target_bo->offset64),
466fe517fc9Smrg			    lower_32_bits(target_bo->offset64));
467fe517fc9Smrg		}
46822944501Smrg	}
46922944501Smrg}
47022944501Smrg
47122944501Smrgstatic inline void
47222944501Smrgdrm_intel_gem_bo_reference(drm_intel_bo *bo)
47322944501Smrg{
47422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
47522944501Smrg
47622944501Smrg	atomic_inc(&bo_gem->refcount);
47722944501Smrg}
47822944501Smrg
47922944501Smrg/**
48022944501Smrg * Adds the given buffer to the list of buffers to be validated (moved into the
48122944501Smrg * appropriate memory type) with the next batch submission.
48222944501Smrg *
48322944501Smrg * If a buffer is validated multiple times in a batch submission, it ends up
48422944501Smrg * with the intersection of the memory type flags and the union of the
48522944501Smrg * access flags.
48622944501Smrg */
48722944501Smrgstatic void
48822944501Smrgdrm_intel_add_validate_buffer(drm_intel_bo *bo)
48922944501Smrg{
49022944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
49122944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
49222944501Smrg	int index;
49322944501Smrg
49422944501Smrg	if (bo_gem->validate_index != -1)
49522944501Smrg		return;
49622944501Smrg
49722944501Smrg	/* Extend the array of validation entries as necessary. */
49822944501Smrg	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
49922944501Smrg		int new_size = bufmgr_gem->exec_size * 2;
50022944501Smrg
50122944501Smrg		if (new_size == 0)
50222944501Smrg			new_size = 5;
50322944501Smrg
50422944501Smrg		bufmgr_gem->exec_objects =
50522944501Smrg		    realloc(bufmgr_gem->exec_objects,
50622944501Smrg			    sizeof(*bufmgr_gem->exec_objects) * new_size);
50722944501Smrg		bufmgr_gem->exec_bos =
50822944501Smrg		    realloc(bufmgr_gem->exec_bos,
50922944501Smrg			    sizeof(*bufmgr_gem->exec_bos) * new_size);
51022944501Smrg		bufmgr_gem->exec_size = new_size;
51122944501Smrg	}
51222944501Smrg
51322944501Smrg	index = bufmgr_gem->exec_count;
51422944501Smrg	bo_gem->validate_index = index;
51522944501Smrg	/* Fill in array entry */
51622944501Smrg	bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
51722944501Smrg	bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
51822944501Smrg	bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
519fe517fc9Smrg	bufmgr_gem->exec_objects[index].alignment = bo->align;
52022944501Smrg	bufmgr_gem->exec_objects[index].offset = 0;
52122944501Smrg	bufmgr_gem->exec_bos[index] = bo;
52222944501Smrg	bufmgr_gem->exec_count++;
52322944501Smrg}
52422944501Smrg
52522944501Smrgstatic void
52622944501Smrgdrm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
52722944501Smrg{
52822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
52922944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
53022944501Smrg	int index;
5310655efefSmrg	unsigned long flags;
532fe517fc9Smrg
5330655efefSmrg	flags = 0;
534fe517fc9Smrg	if (need_fence)
535fe517fc9Smrg		flags |= EXEC_OBJECT_NEEDS_FENCE;
53622944501Smrg
53722944501Smrg	if (bo_gem->validate_index != -1) {
538fe517fc9Smrg		bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
53922944501Smrg		return;
54022944501Smrg	}
54122944501Smrg
54222944501Smrg	/* Extend the array of validation entries as necessary. */
54322944501Smrg	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
54422944501Smrg		int new_size = bufmgr_gem->exec_size * 2;
54522944501Smrg
54622944501Smrg		if (new_size == 0)
54722944501Smrg			new_size = 5;
54822944501Smrg
54922944501Smrg		bufmgr_gem->exec2_objects =
55022944501Smrg			realloc(bufmgr_gem->exec2_objects,
55122944501Smrg				sizeof(*bufmgr_gem->exec2_objects) * new_size);
55222944501Smrg		bufmgr_gem->exec_bos =
55322944501Smrg			realloc(bufmgr_gem->exec_bos,
55422944501Smrg				sizeof(*bufmgr_gem->exec_bos) * new_size);
55522944501Smrg		bufmgr_gem->exec_size = new_size;
55622944501Smrg	}
55722944501Smrg
55822944501Smrg	index = bufmgr_gem->exec_count;
55922944501Smrg	bo_gem->validate_index = index;
56022944501Smrg	/* Fill in array entry */
56122944501Smrg	bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
56222944501Smrg	bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
56322944501Smrg	bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
564fe517fc9Smrg	bufmgr_gem->exec2_objects[index].alignment = bo->align;
5652ee35494Smrg	bufmgr_gem->exec2_objects[index].offset = bo->offset64;
5660655efefSmrg	bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags | flags;
56722944501Smrg	bufmgr_gem->exec2_objects[index].rsvd1 = 0;
56822944501Smrg	bufmgr_gem->exec2_objects[index].rsvd2 = 0;
5692ee35494Smrg	bufmgr_gem->exec_bos[index] = bo;
57022944501Smrg	bufmgr_gem->exec_count++;
57122944501Smrg}
57222944501Smrg
57322944501Smrg#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
57422944501Smrg	sizeof(uint32_t))
57522944501Smrg
57622944501Smrgstatic void
57722944501Smrgdrm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
578fe517fc9Smrg				      drm_intel_bo_gem *bo_gem,
579fe517fc9Smrg				      unsigned int alignment)
58022944501Smrg{
581fe517fc9Smrg	unsigned int size;
58222944501Smrg
58322944501Smrg	assert(!bo_gem->used_as_reloc_target);
58422944501Smrg
58522944501Smrg	/* The older chipsets are far-less flexible in terms of tiling,
58622944501Smrg	 * and require tiled buffer to be size aligned in the aperture.
58722944501Smrg	 * This means that in the worst possible case we will need a hole
58822944501Smrg	 * twice as large as the object in order for it to fit into the
58922944501Smrg	 * aperture. Optimal packing is for wimps.
59022944501Smrg	 */
59122944501Smrg	size = bo_gem->bo.size;
5929ce4edccSmrg	if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
593fe517fc9Smrg		unsigned int min_size;
5949ce4edccSmrg
5959ce4edccSmrg		if (bufmgr_gem->has_relaxed_fencing) {
5969ce4edccSmrg			if (bufmgr_gem->gen == 3)
5979ce4edccSmrg				min_size = 1024*1024;
5989ce4edccSmrg			else
5999ce4edccSmrg				min_size = 512*1024;
6009ce4edccSmrg
6019ce4edccSmrg			while (min_size < size)
6029ce4edccSmrg				min_size *= 2;
6039ce4edccSmrg		} else
6049ce4edccSmrg			min_size = size;
6059ce4edccSmrg
6069ce4edccSmrg		/* Account for worst-case alignment. */
607fe517fc9Smrg		alignment = MAX2(alignment, min_size);
6089ce4edccSmrg	}
60922944501Smrg
610fe517fc9Smrg	bo_gem->reloc_tree_size = size + alignment;
61122944501Smrg}
61222944501Smrg
61322944501Smrgstatic int
61422944501Smrgdrm_intel_setup_reloc_list(drm_intel_bo *bo)
61522944501Smrg{
61622944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
61722944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
61822944501Smrg	unsigned int max_relocs = bufmgr_gem->max_relocs;
61922944501Smrg
62022944501Smrg	if (bo->size / 4 < max_relocs)
62122944501Smrg		max_relocs = bo->size / 4;
62222944501Smrg
62322944501Smrg	bo_gem->relocs = malloc(max_relocs *
62422944501Smrg				sizeof(struct drm_i915_gem_relocation_entry));
62522944501Smrg	bo_gem->reloc_target_info = malloc(max_relocs *
626aaba2545Smrg					   sizeof(drm_intel_reloc_target));
62722944501Smrg	if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
62820131375Smrg		bo_gem->has_error = true;
62922944501Smrg
63022944501Smrg		free (bo_gem->relocs);
63122944501Smrg		bo_gem->relocs = NULL;
63222944501Smrg
63322944501Smrg		free (bo_gem->reloc_target_info);
63422944501Smrg		bo_gem->reloc_target_info = NULL;
63522944501Smrg
63622944501Smrg		return 1;
63722944501Smrg	}
63822944501Smrg
63922944501Smrg	return 0;
64022944501Smrg}
64122944501Smrg
64222944501Smrgstatic int
64322944501Smrgdrm_intel_gem_bo_busy(drm_intel_bo *bo)
64422944501Smrg{
64522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
64622944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
64722944501Smrg	struct drm_i915_gem_busy busy;
64822944501Smrg	int ret;
64922944501Smrg
65020131375Smrg	if (bo_gem->reusable && bo_gem->idle)
65120131375Smrg		return false;
65220131375Smrg
653424e9256Smrg	memclear(busy);
65422944501Smrg	busy.handle = bo_gem->gem_handle;
65522944501Smrg
6566d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
65720131375Smrg	if (ret == 0) {
65820131375Smrg		bo_gem->idle = !busy.busy;
65920131375Smrg		return busy.busy;
66020131375Smrg	} else {
66120131375Smrg		return false;
66220131375Smrg	}
66322944501Smrg}
66422944501Smrg
66522944501Smrgstatic int
66622944501Smrgdrm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
66722944501Smrg				  drm_intel_bo_gem *bo_gem, int state)
66822944501Smrg{
66922944501Smrg	struct drm_i915_gem_madvise madv;
67022944501Smrg
671424e9256Smrg	memclear(madv);
67222944501Smrg	madv.handle = bo_gem->gem_handle;
67322944501Smrg	madv.madv = state;
67422944501Smrg	madv.retained = 1;
6756d98c517Smrg	drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
67622944501Smrg
67722944501Smrg	return madv.retained;
67822944501Smrg}
67922944501Smrg
68022944501Smrgstatic int
68122944501Smrgdrm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
68222944501Smrg{
68322944501Smrg	return drm_intel_gem_bo_madvise_internal
68422944501Smrg		((drm_intel_bufmgr_gem *) bo->bufmgr,
68522944501Smrg		 (drm_intel_bo_gem *) bo,
68622944501Smrg		 madv);
68722944501Smrg}
68822944501Smrg
68922944501Smrg/* drop the oldest entries that have been purged by the kernel */
69022944501Smrgstatic void
69122944501Smrgdrm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
69222944501Smrg				    struct drm_intel_gem_bo_bucket *bucket)
69322944501Smrg{
69422944501Smrg	while (!DRMLISTEMPTY(&bucket->head)) {
69522944501Smrg		drm_intel_bo_gem *bo_gem;
69622944501Smrg
69722944501Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
69822944501Smrg				      bucket->head.next, head);
69922944501Smrg		if (drm_intel_gem_bo_madvise_internal
70022944501Smrg		    (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
70122944501Smrg			break;
70222944501Smrg
70322944501Smrg		DRMLISTDEL(&bo_gem->head);
70422944501Smrg		drm_intel_gem_bo_free(&bo_gem->bo);
70522944501Smrg	}
70622944501Smrg}
70722944501Smrg
70822944501Smrgstatic drm_intel_bo *
70922944501Smrgdrm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
71022944501Smrg				const char *name,
71122944501Smrg				unsigned long size,
7126d98c517Smrg				unsigned long flags,
7136d98c517Smrg				uint32_t tiling_mode,
714fe517fc9Smrg				unsigned long stride,
715fe517fc9Smrg				unsigned int alignment)
71622944501Smrg{
71722944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
71822944501Smrg	drm_intel_bo_gem *bo_gem;
71922944501Smrg	unsigned int page_size = getpagesize();
72022944501Smrg	int ret;
72122944501Smrg	struct drm_intel_gem_bo_bucket *bucket;
72220131375Smrg	bool alloc_from_cache;
72322944501Smrg	unsigned long bo_size;
72420131375Smrg	bool for_render = false;
72522944501Smrg
72622944501Smrg	if (flags & BO_ALLOC_FOR_RENDER)
72720131375Smrg		for_render = true;
72822944501Smrg
72922944501Smrg	/* Round the allocated size up to a power of two number of pages. */
73022944501Smrg	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
73122944501Smrg
73222944501Smrg	/* If we don't have caching at this size, don't actually round the
73322944501Smrg	 * allocation up.
73422944501Smrg	 */
73522944501Smrg	if (bucket == NULL) {
73622944501Smrg		bo_size = size;
73722944501Smrg		if (bo_size < page_size)
73822944501Smrg			bo_size = page_size;
73922944501Smrg	} else {
74022944501Smrg		bo_size = bucket->size;
74122944501Smrg	}
74222944501Smrg
74322944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
74422944501Smrg	/* Get a buffer out of the cache if available */
74522944501Smrgretry:
74620131375Smrg	alloc_from_cache = false;
74722944501Smrg	if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
74822944501Smrg		if (for_render) {
74922944501Smrg			/* Allocate new render-target BOs from the tail (MRU)
75022944501Smrg			 * of the list, as it will likely be hot in the GPU
75122944501Smrg			 * cache and in the aperture for us.
75222944501Smrg			 */
75322944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
75422944501Smrg					      bucket->head.prev, head);
75522944501Smrg			DRMLISTDEL(&bo_gem->head);
75620131375Smrg			alloc_from_cache = true;
757fe517fc9Smrg			bo_gem->bo.align = alignment;
75822944501Smrg		} else {
759fe517fc9Smrg			assert(alignment == 0);
76022944501Smrg			/* For non-render-target BOs (where we're probably
76122944501Smrg			 * going to map it first thing in order to fill it
76222944501Smrg			 * with data), check if the last BO in the cache is
76322944501Smrg			 * unbusy, and only reuse in that case. Otherwise,
76422944501Smrg			 * allocating a new buffer is probably faster than
76522944501Smrg			 * waiting for the GPU to finish.
76622944501Smrg			 */
76722944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
76822944501Smrg					      bucket->head.next, head);
76922944501Smrg			if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
77020131375Smrg				alloc_from_cache = true;
77122944501Smrg				DRMLISTDEL(&bo_gem->head);
77222944501Smrg			}
77322944501Smrg		}
77422944501Smrg
77522944501Smrg		if (alloc_from_cache) {
77622944501Smrg			if (!drm_intel_gem_bo_madvise_internal
77722944501Smrg			    (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
77822944501Smrg				drm_intel_gem_bo_free(&bo_gem->bo);
77922944501Smrg				drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
78022944501Smrg								    bucket);
78122944501Smrg				goto retry;
78222944501Smrg			}
7836d98c517Smrg
7846d98c517Smrg			if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
7856d98c517Smrg								 tiling_mode,
7866d98c517Smrg								 stride)) {
7876d98c517Smrg				drm_intel_gem_bo_free(&bo_gem->bo);
7886d98c517Smrg				goto retry;
7896d98c517Smrg			}
79022944501Smrg		}
79122944501Smrg	}
79222944501Smrg
79322944501Smrg	if (!alloc_from_cache) {
79422944501Smrg		struct drm_i915_gem_create create;
79522944501Smrg
79622944501Smrg		bo_gem = calloc(1, sizeof(*bo_gem));
79722944501Smrg		if (!bo_gem)
7982ee35494Smrg			goto err;
7992ee35494Smrg
8002ee35494Smrg		/* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
8012ee35494Smrg		   list (vma_list), so better set the list head here */
8022ee35494Smrg		DRMINITLISTHEAD(&bo_gem->vma_list);
80322944501Smrg
80422944501Smrg		bo_gem->bo.size = bo_size;
80520131375Smrg
806424e9256Smrg		memclear(create);
80722944501Smrg		create.size = bo_size;
80822944501Smrg
8096d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
8106d98c517Smrg			       DRM_IOCTL_I915_GEM_CREATE,
8116d98c517Smrg			       &create);
81222944501Smrg		if (ret != 0) {
81322944501Smrg			free(bo_gem);
8142ee35494Smrg			goto err;
81522944501Smrg		}
8162ee35494Smrg
8172ee35494Smrg		bo_gem->gem_handle = create.handle;
8180655efefSmrg		HASH_ADD(handle_hh, bufmgr_gem->handle_table,
8190655efefSmrg			 gem_handle, sizeof(bo_gem->gem_handle),
8200655efefSmrg			 bo_gem);
8210655efefSmrg
8222ee35494Smrg		bo_gem->bo.handle = bo_gem->gem_handle;
82322944501Smrg		bo_gem->bo.bufmgr = bufmgr;
824fe517fc9Smrg		bo_gem->bo.align = alignment;
8256d98c517Smrg
8266d98c517Smrg		bo_gem->tiling_mode = I915_TILING_NONE;
8276d98c517Smrg		bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
8286d98c517Smrg		bo_gem->stride = 0;
8296d98c517Smrg
8306d98c517Smrg		if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
8316d98c517Smrg							 tiling_mode,
8322ee35494Smrg							 stride))
8332ee35494Smrg			goto err_free;
83422944501Smrg	}
83522944501Smrg
83622944501Smrg	bo_gem->name = name;
83722944501Smrg	atomic_set(&bo_gem->refcount, 1);
83822944501Smrg	bo_gem->validate_index = -1;
83922944501Smrg	bo_gem->reloc_tree_fences = 0;
84020131375Smrg	bo_gem->used_as_reloc_target = false;
84120131375Smrg	bo_gem->has_error = false;
84220131375Smrg	bo_gem->reusable = true;
84322944501Smrg
844fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
8452ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
84622944501Smrg
84722944501Smrg	DBG("bo_create: buf %d (%s) %ldb\n",
84822944501Smrg	    bo_gem->gem_handle, bo_gem->name, size);
84922944501Smrg
85022944501Smrg	return &bo_gem->bo;
8512ee35494Smrg
8522ee35494Smrgerr_free:
8532ee35494Smrg	drm_intel_gem_bo_free(&bo_gem->bo);
8542ee35494Smrgerr:
8552ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
8562ee35494Smrg	return NULL;
85722944501Smrg}
85822944501Smrg
85922944501Smrgstatic drm_intel_bo *
86022944501Smrgdrm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
86122944501Smrg				  const char *name,
86222944501Smrg				  unsigned long size,
86322944501Smrg				  unsigned int alignment)
86422944501Smrg{
86522944501Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
8666d98c517Smrg					       BO_ALLOC_FOR_RENDER,
867fe517fc9Smrg					       I915_TILING_NONE, 0,
868fe517fc9Smrg					       alignment);
86922944501Smrg}
87022944501Smrg
87122944501Smrgstatic drm_intel_bo *
87222944501Smrgdrm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
87322944501Smrg		       const char *name,
87422944501Smrg		       unsigned long size,
87522944501Smrg		       unsigned int alignment)
87622944501Smrg{
8776d98c517Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
878fe517fc9Smrg					       I915_TILING_NONE, 0, 0);
87922944501Smrg}
88022944501Smrg
88122944501Smrgstatic drm_intel_bo *
88222944501Smrgdrm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
88322944501Smrg			     int x, int y, int cpp, uint32_t *tiling_mode,
88422944501Smrg			     unsigned long *pitch, unsigned long flags)
88522944501Smrg{
88622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
887aaba2545Smrg	unsigned long size, stride;
888aaba2545Smrg	uint32_t tiling;
88922944501Smrg
890aaba2545Smrg	do {
89120131375Smrg		unsigned long aligned_y, height_alignment;
892aaba2545Smrg
893aaba2545Smrg		tiling = *tiling_mode;
894aaba2545Smrg
895aaba2545Smrg		/* If we're tiled, our allocations are in 8 or 32-row blocks,
896aaba2545Smrg		 * so failure to align our height means that we won't allocate
897aaba2545Smrg		 * enough pages.
898aaba2545Smrg		 *
899aaba2545Smrg		 * If we're untiled, we still have to align to 2 rows high
900aaba2545Smrg		 * because the data port accesses 2x2 blocks even if the
901aaba2545Smrg		 * bottom row isn't to be rendered, so failure to align means
902aaba2545Smrg		 * we could walk off the end of the GTT and fault.  This is
903aaba2545Smrg		 * documented on 965, and may be the case on older chipsets
904aaba2545Smrg		 * too so we try to be careful.
905aaba2545Smrg		 */
906aaba2545Smrg		aligned_y = y;
90720131375Smrg		height_alignment = 2;
90820131375Smrg
90920131375Smrg		if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
91020131375Smrg			height_alignment = 16;
91120131375Smrg		else if (tiling == I915_TILING_X
91220131375Smrg			|| (IS_915(bufmgr_gem->pci_device)
91320131375Smrg			    && tiling == I915_TILING_Y))
91420131375Smrg			height_alignment = 8;
915aaba2545Smrg		else if (tiling == I915_TILING_Y)
91620131375Smrg			height_alignment = 32;
91720131375Smrg		aligned_y = ALIGN(y, height_alignment);
918aaba2545Smrg
919aaba2545Smrg		stride = x * cpp;
9206d98c517Smrg		stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
921aaba2545Smrg		size = stride * aligned_y;
922aaba2545Smrg		size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
923aaba2545Smrg	} while (*tiling_mode != tiling);
92422944501Smrg	*pitch = stride;
92522944501Smrg
9266d98c517Smrg	if (tiling == I915_TILING_NONE)
9276d98c517Smrg		stride = 0;
9286d98c517Smrg
9296d98c517Smrg	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
930fe517fc9Smrg					       tiling, stride, 0);
93122944501Smrg}
93222944501Smrg
933a884aba1Smrgstatic drm_intel_bo *
934a884aba1Smrgdrm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
935a884aba1Smrg				const char *name,
936a884aba1Smrg				void *addr,
937a884aba1Smrg				uint32_t tiling_mode,
938a884aba1Smrg				uint32_t stride,
939a884aba1Smrg				unsigned long size,
940a884aba1Smrg				unsigned long flags)
941a884aba1Smrg{
942a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
943a884aba1Smrg	drm_intel_bo_gem *bo_gem;
944a884aba1Smrg	int ret;
945a884aba1Smrg	struct drm_i915_gem_userptr userptr;
946a884aba1Smrg
947a884aba1Smrg	/* Tiling with userptr surfaces is not supported
948a884aba1Smrg	 * on all hardware so refuse it for time being.
949a884aba1Smrg	 */
950a884aba1Smrg	if (tiling_mode != I915_TILING_NONE)
951a884aba1Smrg		return NULL;
952a884aba1Smrg
953a884aba1Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
954a884aba1Smrg	if (!bo_gem)
955a884aba1Smrg		return NULL;
956a884aba1Smrg
9572ee35494Smrg	atomic_set(&bo_gem->refcount, 1);
9582ee35494Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
9592ee35494Smrg
960a884aba1Smrg	bo_gem->bo.size = size;
961a884aba1Smrg
962424e9256Smrg	memclear(userptr);
963a884aba1Smrg	userptr.user_ptr = (__u64)((unsigned long)addr);
964a884aba1Smrg	userptr.user_size = size;
965a884aba1Smrg	userptr.flags = flags;
966a884aba1Smrg
967a884aba1Smrg	ret = drmIoctl(bufmgr_gem->fd,
968a884aba1Smrg			DRM_IOCTL_I915_GEM_USERPTR,
969a884aba1Smrg			&userptr);
970a884aba1Smrg	if (ret != 0) {
971a884aba1Smrg		DBG("bo_create_userptr: "
972a884aba1Smrg		    "ioctl failed with user ptr %p size 0x%lx, "
973a884aba1Smrg		    "user flags 0x%lx\n", addr, size, flags);
974a884aba1Smrg		free(bo_gem);
975a884aba1Smrg		return NULL;
976a884aba1Smrg	}
977a884aba1Smrg
9782ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
9792ee35494Smrg
980a884aba1Smrg	bo_gem->gem_handle = userptr.handle;
981a884aba1Smrg	bo_gem->bo.handle = bo_gem->gem_handle;
982a884aba1Smrg	bo_gem->bo.bufmgr    = bufmgr;
983a884aba1Smrg	bo_gem->is_userptr   = true;
984a884aba1Smrg	bo_gem->bo.virtual   = addr;
985a884aba1Smrg	/* Save the address provided by user */
986a884aba1Smrg	bo_gem->user_virtual = addr;
987a884aba1Smrg	bo_gem->tiling_mode  = I915_TILING_NONE;
988a884aba1Smrg	bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
989a884aba1Smrg	bo_gem->stride       = 0;
990a884aba1Smrg
9912ee35494Smrg	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
9922ee35494Smrg		 gem_handle, sizeof(bo_gem->gem_handle),
9932ee35494Smrg		 bo_gem);
994a884aba1Smrg
995a884aba1Smrg	bo_gem->name = name;
996a884aba1Smrg	bo_gem->validate_index = -1;
997a884aba1Smrg	bo_gem->reloc_tree_fences = 0;
998a884aba1Smrg	bo_gem->used_as_reloc_target = false;
999a884aba1Smrg	bo_gem->has_error = false;
1000a884aba1Smrg	bo_gem->reusable = false;
1001a884aba1Smrg
1002fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
10032ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
1004a884aba1Smrg
1005a884aba1Smrg	DBG("bo_create_userptr: "
1006a884aba1Smrg	    "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1007a884aba1Smrg		addr, bo_gem->gem_handle, bo_gem->name,
1008a884aba1Smrg		size, stride, tiling_mode);
1009a884aba1Smrg
1010a884aba1Smrg	return &bo_gem->bo;
1011a884aba1Smrg}
1012a884aba1Smrg
1013424e9256Smrgstatic bool
1014424e9256Smrghas_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1015424e9256Smrg{
1016424e9256Smrg	int ret;
1017424e9256Smrg	void *ptr;
1018424e9256Smrg	long pgsz;
1019424e9256Smrg	struct drm_i915_gem_userptr userptr;
1020424e9256Smrg
1021424e9256Smrg	pgsz = sysconf(_SC_PAGESIZE);
1022424e9256Smrg	assert(pgsz > 0);
1023424e9256Smrg
1024424e9256Smrg	ret = posix_memalign(&ptr, pgsz, pgsz);
1025424e9256Smrg	if (ret) {
1026424e9256Smrg		DBG("Failed to get a page (%ld) for userptr detection!\n",
1027424e9256Smrg			pgsz);
1028424e9256Smrg		return false;
1029424e9256Smrg	}
1030424e9256Smrg
1031424e9256Smrg	memclear(userptr);
1032424e9256Smrg	userptr.user_ptr = (__u64)(unsigned long)ptr;
1033424e9256Smrg	userptr.user_size = pgsz;
1034424e9256Smrg
1035424e9256Smrgretry:
1036424e9256Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1037424e9256Smrg	if (ret) {
1038424e9256Smrg		if (errno == ENODEV && userptr.flags == 0) {
1039424e9256Smrg			userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1040424e9256Smrg			goto retry;
1041424e9256Smrg		}
1042424e9256Smrg		free(ptr);
1043424e9256Smrg		return false;
1044424e9256Smrg	}
1045424e9256Smrg
1046424e9256Smrg	/* We don't release the userptr bo here as we want to keep the
1047424e9256Smrg	 * kernel mm tracking alive for our lifetime. The first time we
1048424e9256Smrg	 * create a userptr object the kernel has to install a mmu_notifer
1049424e9256Smrg	 * which is a heavyweight operation (e.g. it requires taking all
1050424e9256Smrg	 * mm_locks and stop_machine()).
1051424e9256Smrg	 */
1052424e9256Smrg
1053424e9256Smrg	bufmgr_gem->userptr_active.ptr = ptr;
1054424e9256Smrg	bufmgr_gem->userptr_active.handle = userptr.handle;
1055424e9256Smrg
1056424e9256Smrg	return true;
1057424e9256Smrg}
1058424e9256Smrg
1059424e9256Smrgstatic drm_intel_bo *
1060424e9256Smrgcheck_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1061424e9256Smrg		       const char *name,
1062424e9256Smrg		       void *addr,
1063424e9256Smrg		       uint32_t tiling_mode,
1064424e9256Smrg		       uint32_t stride,
1065424e9256Smrg		       unsigned long size,
1066424e9256Smrg		       unsigned long flags)
1067424e9256Smrg{
1068424e9256Smrg	if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1069424e9256Smrg		bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1070424e9256Smrg	else
1071424e9256Smrg		bufmgr->bo_alloc_userptr = NULL;
1072424e9256Smrg
1073424e9256Smrg	return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1074424e9256Smrg					  tiling_mode, stride, size, flags);
1075424e9256Smrg}
1076424e9256Smrg
107722944501Smrg/**
107822944501Smrg * Returns a drm_intel_bo wrapping the given buffer object handle.
107922944501Smrg *
108022944501Smrg * This can be used when one application needs to pass a buffer object
108122944501Smrg * to another.
108222944501Smrg */
1083424e9256Smrgdrm_intel_bo *
108422944501Smrgdrm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
108522944501Smrg				  const char *name,
108622944501Smrg				  unsigned int handle)
108722944501Smrg{
108822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
108922944501Smrg	drm_intel_bo_gem *bo_gem;
109022944501Smrg	int ret;
109122944501Smrg	struct drm_gem_open open_arg;
109222944501Smrg	struct drm_i915_gem_get_tiling get_tiling;
109322944501Smrg
109420131375Smrg	/* At the moment most applications only have a few named bo.
109520131375Smrg	 * For instance, in a DRI client only the render buffers passed
109620131375Smrg	 * between X and the client are named. And since X returns the
109720131375Smrg	 * alternating names for the front/back buffer a linear search
109820131375Smrg	 * provides a sufficiently fast match.
109920131375Smrg	 */
1100a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
11012ee35494Smrg	HASH_FIND(name_hh, bufmgr_gem->name_table,
11022ee35494Smrg		  &handle, sizeof(handle), bo_gem);
11032ee35494Smrg	if (bo_gem) {
11042ee35494Smrg		drm_intel_gem_bo_reference(&bo_gem->bo);
11052ee35494Smrg		goto out;
110620131375Smrg	}
110722944501Smrg
1108424e9256Smrg	memclear(open_arg);
110922944501Smrg	open_arg.name = handle;
11106d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
11116d98c517Smrg		       DRM_IOCTL_GEM_OPEN,
11126d98c517Smrg		       &open_arg);
111322944501Smrg	if (ret != 0) {
11149ce4edccSmrg		DBG("Couldn't reference %s handle 0x%08x: %s\n",
11159ce4edccSmrg		    name, handle, strerror(errno));
11162ee35494Smrg		bo_gem = NULL;
11172ee35494Smrg		goto out;
111822944501Smrg	}
111920131375Smrg        /* Now see if someone has used a prime handle to get this
112020131375Smrg         * object from the kernel before by looking through the list
112120131375Smrg         * again for a matching gem_handle
112220131375Smrg         */
11232ee35494Smrg	HASH_FIND(handle_hh, bufmgr_gem->handle_table,
11242ee35494Smrg		  &open_arg.handle, sizeof(open_arg.handle), bo_gem);
11252ee35494Smrg	if (bo_gem) {
11262ee35494Smrg		drm_intel_gem_bo_reference(&bo_gem->bo);
11272ee35494Smrg		goto out;
112820131375Smrg	}
112920131375Smrg
113020131375Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
11312ee35494Smrg	if (!bo_gem)
11322ee35494Smrg		goto out;
11332ee35494Smrg
11342ee35494Smrg	atomic_set(&bo_gem->refcount, 1);
11352ee35494Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
113620131375Smrg
113722944501Smrg	bo_gem->bo.size = open_arg.size;
113822944501Smrg	bo_gem->bo.offset = 0;
113920131375Smrg	bo_gem->bo.offset64 = 0;
114022944501Smrg	bo_gem->bo.virtual = NULL;
114122944501Smrg	bo_gem->bo.bufmgr = bufmgr;
114222944501Smrg	bo_gem->name = name;
114322944501Smrg	bo_gem->validate_index = -1;
114422944501Smrg	bo_gem->gem_handle = open_arg.handle;
114520131375Smrg	bo_gem->bo.handle = open_arg.handle;
114622944501Smrg	bo_gem->global_name = handle;
114720131375Smrg	bo_gem->reusable = false;
114822944501Smrg
11492ee35494Smrg	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
11502ee35494Smrg		 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
11512ee35494Smrg	HASH_ADD(name_hh, bufmgr_gem->name_table,
11522ee35494Smrg		 global_name, sizeof(bo_gem->global_name), bo_gem);
11532ee35494Smrg
1154424e9256Smrg	memclear(get_tiling);
115522944501Smrg	get_tiling.handle = bo_gem->gem_handle;
11566d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
11576d98c517Smrg		       DRM_IOCTL_I915_GEM_GET_TILING,
11586d98c517Smrg		       &get_tiling);
11592ee35494Smrg	if (ret != 0)
11602ee35494Smrg		goto err_unref;
11612ee35494Smrg
116222944501Smrg	bo_gem->tiling_mode = get_tiling.tiling_mode;
116322944501Smrg	bo_gem->swizzle_mode = get_tiling.swizzle_mode;
11646d98c517Smrg	/* XXX stride is unknown */
1165fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
116622944501Smrg	DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
116722944501Smrg
11682ee35494Smrgout:
11692ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
117022944501Smrg	return &bo_gem->bo;
11712ee35494Smrg
11722ee35494Smrgerr_unref:
11732ee35494Smrg	drm_intel_gem_bo_free(&bo_gem->bo);
11742ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
11752ee35494Smrg	return NULL;
117622944501Smrg}
117722944501Smrg
117822944501Smrgstatic void
117922944501Smrgdrm_intel_gem_bo_free(drm_intel_bo *bo)
118022944501Smrg{
118122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
118222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
118322944501Smrg	struct drm_gem_close close;
118422944501Smrg	int ret;
118522944501Smrg
118620131375Smrg	DRMLISTDEL(&bo_gem->vma_list);
118720131375Smrg	if (bo_gem->mem_virtual) {
118820131375Smrg		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1189a884aba1Smrg		drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
119020131375Smrg		bufmgr_gem->vma_count--;
119120131375Smrg	}
11922ee35494Smrg	if (bo_gem->wc_virtual) {
11932ee35494Smrg		VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
11942ee35494Smrg		drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
11952ee35494Smrg		bufmgr_gem->vma_count--;
11962ee35494Smrg	}
119720131375Smrg	if (bo_gem->gtt_virtual) {
1198a884aba1Smrg		drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
119920131375Smrg		bufmgr_gem->vma_count--;
120020131375Smrg	}
120122944501Smrg
12022ee35494Smrg	if (bo_gem->global_name)
12032ee35494Smrg		HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
12042ee35494Smrg	HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
12052ee35494Smrg
120622944501Smrg	/* Close this object */
1207424e9256Smrg	memclear(close);
120822944501Smrg	close.handle = bo_gem->gem_handle;
12096d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
121022944501Smrg	if (ret != 0) {
12119ce4edccSmrg		DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
12129ce4edccSmrg		    bo_gem->gem_handle, bo_gem->name, strerror(errno));
121322944501Smrg	}
121422944501Smrg	free(bo);
121522944501Smrg}
121622944501Smrg
121720131375Smrgstatic void
121820131375Smrgdrm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
121920131375Smrg{
122020131375Smrg#if HAVE_VALGRIND
122120131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
122220131375Smrg
122320131375Smrg	if (bo_gem->mem_virtual)
122420131375Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
122520131375Smrg
12262ee35494Smrg	if (bo_gem->wc_virtual)
12272ee35494Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
12282ee35494Smrg
122920131375Smrg	if (bo_gem->gtt_virtual)
123020131375Smrg		VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
123120131375Smrg#endif
123220131375Smrg}
123320131375Smrg
123422944501Smrg/** Frees all cached buffers significantly older than @time. */
123522944501Smrgstatic void
123622944501Smrgdrm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
123722944501Smrg{
123822944501Smrg	int i;
123922944501Smrg
12406d98c517Smrg	if (bufmgr_gem->time == time)
12416d98c517Smrg		return;
12426d98c517Smrg
1243aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
124422944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
124522944501Smrg		    &bufmgr_gem->cache_bucket[i];
124622944501Smrg
124722944501Smrg		while (!DRMLISTEMPTY(&bucket->head)) {
124822944501Smrg			drm_intel_bo_gem *bo_gem;
124922944501Smrg
125022944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
125122944501Smrg					      bucket->head.next, head);
125222944501Smrg			if (time - bo_gem->free_time <= 1)
125322944501Smrg				break;
125422944501Smrg
125522944501Smrg			DRMLISTDEL(&bo_gem->head);
125622944501Smrg
125722944501Smrg			drm_intel_gem_bo_free(&bo_gem->bo);
125822944501Smrg		}
125922944501Smrg	}
12606d98c517Smrg
12616d98c517Smrg	bufmgr_gem->time = time;
126222944501Smrg}
126322944501Smrg
126420131375Smrgstatic void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
126520131375Smrg{
126620131375Smrg	int limit;
126720131375Smrg
126820131375Smrg	DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
126920131375Smrg	    bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
127020131375Smrg
127120131375Smrg	if (bufmgr_gem->vma_max < 0)
127220131375Smrg		return;
127320131375Smrg
127420131375Smrg	/* We may need to evict a few entries in order to create new mmaps */
127520131375Smrg	limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
127620131375Smrg	if (limit < 0)
127720131375Smrg		limit = 0;
127820131375Smrg
127920131375Smrg	while (bufmgr_gem->vma_count > limit) {
128020131375Smrg		drm_intel_bo_gem *bo_gem;
128120131375Smrg
128220131375Smrg		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
128320131375Smrg				      bufmgr_gem->vma_cache.next,
128420131375Smrg				      vma_list);
128520131375Smrg		assert(bo_gem->map_count == 0);
128620131375Smrg		DRMLISTDELINIT(&bo_gem->vma_list);
128720131375Smrg
128820131375Smrg		if (bo_gem->mem_virtual) {
1289a884aba1Smrg			drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
129020131375Smrg			bo_gem->mem_virtual = NULL;
129120131375Smrg			bufmgr_gem->vma_count--;
129220131375Smrg		}
12932ee35494Smrg		if (bo_gem->wc_virtual) {
12942ee35494Smrg			drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
12952ee35494Smrg			bo_gem->wc_virtual = NULL;
12962ee35494Smrg			bufmgr_gem->vma_count--;
12972ee35494Smrg		}
129820131375Smrg		if (bo_gem->gtt_virtual) {
1299a884aba1Smrg			drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
130020131375Smrg			bo_gem->gtt_virtual = NULL;
130120131375Smrg			bufmgr_gem->vma_count--;
130220131375Smrg		}
130320131375Smrg	}
130420131375Smrg}
130520131375Smrg
130620131375Smrgstatic void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
130720131375Smrg				       drm_intel_bo_gem *bo_gem)
130820131375Smrg{
130920131375Smrg	bufmgr_gem->vma_open--;
131020131375Smrg	DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
131120131375Smrg	if (bo_gem->mem_virtual)
131220131375Smrg		bufmgr_gem->vma_count++;
13132ee35494Smrg	if (bo_gem->wc_virtual)
13142ee35494Smrg		bufmgr_gem->vma_count++;
131520131375Smrg	if (bo_gem->gtt_virtual)
131620131375Smrg		bufmgr_gem->vma_count++;
131720131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
131820131375Smrg}
131920131375Smrg
132020131375Smrgstatic void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
132120131375Smrg				      drm_intel_bo_gem *bo_gem)
132220131375Smrg{
132320131375Smrg	bufmgr_gem->vma_open++;
132420131375Smrg	DRMLISTDEL(&bo_gem->vma_list);
132520131375Smrg	if (bo_gem->mem_virtual)
132620131375Smrg		bufmgr_gem->vma_count--;
13272ee35494Smrg	if (bo_gem->wc_virtual)
13282ee35494Smrg		bufmgr_gem->vma_count--;
132920131375Smrg	if (bo_gem->gtt_virtual)
133020131375Smrg		bufmgr_gem->vma_count--;
133120131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
133220131375Smrg}
133320131375Smrg
133422944501Smrgstatic void
133522944501Smrgdrm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
133622944501Smrg{
133722944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
133822944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
133922944501Smrg	struct drm_intel_gem_bo_bucket *bucket;
134022944501Smrg	int i;
134122944501Smrg
134222944501Smrg	/* Unreference all the target buffers */
134322944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
1344aaba2545Smrg		if (bo_gem->reloc_target_info[i].bo != bo) {
1345aaba2545Smrg			drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1346aaba2545Smrg								  reloc_target_info[i].bo,
1347aaba2545Smrg								  time);
1348aaba2545Smrg		}
134922944501Smrg	}
1350fe517fc9Smrg	for (i = 0; i < bo_gem->softpin_target_count; i++)
1351fe517fc9Smrg		drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1352fe517fc9Smrg								  time);
13532ee35494Smrg	bo_gem->kflags = 0;
135422944501Smrg	bo_gem->reloc_count = 0;
135520131375Smrg	bo_gem->used_as_reloc_target = false;
1356fe517fc9Smrg	bo_gem->softpin_target_count = 0;
135722944501Smrg
135822944501Smrg	DBG("bo_unreference final: %d (%s)\n",
135922944501Smrg	    bo_gem->gem_handle, bo_gem->name);
136022944501Smrg
136122944501Smrg	/* release memory associated with this object */
136222944501Smrg	if (bo_gem->reloc_target_info) {
136322944501Smrg		free(bo_gem->reloc_target_info);
136422944501Smrg		bo_gem->reloc_target_info = NULL;
136522944501Smrg	}
136622944501Smrg	if (bo_gem->relocs) {
136722944501Smrg		free(bo_gem->relocs);
136822944501Smrg		bo_gem->relocs = NULL;
136922944501Smrg	}
1370fe517fc9Smrg	if (bo_gem->softpin_target) {
1371fe517fc9Smrg		free(bo_gem->softpin_target);
1372fe517fc9Smrg		bo_gem->softpin_target = NULL;
1373fe517fc9Smrg		bo_gem->softpin_target_size = 0;
1374fe517fc9Smrg	}
137522944501Smrg
137620131375Smrg	/* Clear any left-over mappings */
137720131375Smrg	if (bo_gem->map_count) {
137820131375Smrg		DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
137920131375Smrg		bo_gem->map_count = 0;
138020131375Smrg		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
138120131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
138220131375Smrg	}
138320131375Smrg
138422944501Smrg	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
138522944501Smrg	/* Put the buffer into our internal cache for reuse if we can. */
138622944501Smrg	if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
138722944501Smrg	    drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
138822944501Smrg					      I915_MADV_DONTNEED)) {
138922944501Smrg		bo_gem->free_time = time;
139022944501Smrg
139122944501Smrg		bo_gem->name = NULL;
139222944501Smrg		bo_gem->validate_index = -1;
139322944501Smrg
139422944501Smrg		DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
139522944501Smrg	} else {
139622944501Smrg		drm_intel_gem_bo_free(bo);
139722944501Smrg	}
139822944501Smrg}
139922944501Smrg
140022944501Smrgstatic void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
140122944501Smrg						      time_t time)
140222944501Smrg{
140322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
140422944501Smrg
140522944501Smrg	assert(atomic_read(&bo_gem->refcount) > 0);
140622944501Smrg	if (atomic_dec_and_test(&bo_gem->refcount))
140722944501Smrg		drm_intel_gem_bo_unreference_final(bo, time);
140822944501Smrg}
140922944501Smrg
141022944501Smrgstatic void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
141122944501Smrg{
141222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
141322944501Smrg
141422944501Smrg	assert(atomic_read(&bo_gem->refcount) > 0);
1415a884aba1Smrg
1416a884aba1Smrg	if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
141722944501Smrg		drm_intel_bufmgr_gem *bufmgr_gem =
141822944501Smrg		    (drm_intel_bufmgr_gem *) bo->bufmgr;
141922944501Smrg		struct timespec time;
142022944501Smrg
142122944501Smrg		clock_gettime(CLOCK_MONOTONIC, &time);
142222944501Smrg
142322944501Smrg		pthread_mutex_lock(&bufmgr_gem->lock);
1424a884aba1Smrg
1425a884aba1Smrg		if (atomic_dec_and_test(&bo_gem->refcount)) {
1426a884aba1Smrg			drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1427a884aba1Smrg			drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1428a884aba1Smrg		}
1429a884aba1Smrg
143022944501Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
143122944501Smrg	}
143222944501Smrg}
143322944501Smrg
143422944501Smrgstatic int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
143522944501Smrg{
143622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
143722944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
143822944501Smrg	struct drm_i915_gem_set_domain set_domain;
143922944501Smrg	int ret;
144022944501Smrg
1441a884aba1Smrg	if (bo_gem->is_userptr) {
1442a884aba1Smrg		/* Return the same user ptr */
1443a884aba1Smrg		bo->virtual = bo_gem->user_virtual;
1444a884aba1Smrg		return 0;
1445a884aba1Smrg	}
1446a884aba1Smrg
144722944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
144822944501Smrg
144920131375Smrg	if (bo_gem->map_count++ == 0)
145020131375Smrg		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
145120131375Smrg
145222944501Smrg	if (!bo_gem->mem_virtual) {
145322944501Smrg		struct drm_i915_gem_mmap mmap_arg;
145422944501Smrg
145520131375Smrg		DBG("bo_map: %d (%s), map_count=%d\n",
145620131375Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
145722944501Smrg
1458424e9256Smrg		memclear(mmap_arg);
145922944501Smrg		mmap_arg.handle = bo_gem->gem_handle;
146022944501Smrg		mmap_arg.size = bo->size;
14616d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
14626d98c517Smrg			       DRM_IOCTL_I915_GEM_MMAP,
14636d98c517Smrg			       &mmap_arg);
146422944501Smrg		if (ret != 0) {
146522944501Smrg			ret = -errno;
14669ce4edccSmrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
14679ce4edccSmrg			    __FILE__, __LINE__, bo_gem->gem_handle,
14689ce4edccSmrg			    bo_gem->name, strerror(errno));
146920131375Smrg			if (--bo_gem->map_count == 0)
147020131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
147122944501Smrg			pthread_mutex_unlock(&bufmgr_gem->lock);
147222944501Smrg			return ret;
147322944501Smrg		}
147420131375Smrg		VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
147522944501Smrg		bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
147622944501Smrg	}
147722944501Smrg	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
147822944501Smrg	    bo_gem->mem_virtual);
147922944501Smrg	bo->virtual = bo_gem->mem_virtual;
148022944501Smrg
1481424e9256Smrg	memclear(set_domain);
148222944501Smrg	set_domain.handle = bo_gem->gem_handle;
148322944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_CPU;
148422944501Smrg	if (write_enable)
148522944501Smrg		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
148622944501Smrg	else
148722944501Smrg		set_domain.write_domain = 0;
14886d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
14896d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
14906d98c517Smrg		       &set_domain);
149122944501Smrg	if (ret != 0) {
14929ce4edccSmrg		DBG("%s:%d: Error setting to CPU domain %d: %s\n",
14939ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
14949ce4edccSmrg		    strerror(errno));
149522944501Smrg	}
149622944501Smrg
149720131375Smrg	if (write_enable)
149820131375Smrg		bo_gem->mapped_cpu_write = true;
149920131375Smrg
150020131375Smrg	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
150120131375Smrg	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
150222944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
150322944501Smrg
150422944501Smrg	return 0;
150522944501Smrg}
150622944501Smrg
150720131375Smrgstatic int
150820131375Smrgmap_gtt(drm_intel_bo *bo)
150922944501Smrg{
151022944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
151122944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
151222944501Smrg	int ret;
151322944501Smrg
1514a884aba1Smrg	if (bo_gem->is_userptr)
1515a884aba1Smrg		return -EINVAL;
1516a884aba1Smrg
151720131375Smrg	if (bo_gem->map_count++ == 0)
151820131375Smrg		drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
151922944501Smrg
152022944501Smrg	/* Get a mapping of the buffer if we haven't before. */
152122944501Smrg	if (bo_gem->gtt_virtual == NULL) {
152222944501Smrg		struct drm_i915_gem_mmap_gtt mmap_arg;
152322944501Smrg
152420131375Smrg		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
152520131375Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
152622944501Smrg
1527424e9256Smrg		memclear(mmap_arg);
152822944501Smrg		mmap_arg.handle = bo_gem->gem_handle;
152922944501Smrg
153022944501Smrg		/* Get the fake offset back... */
15316d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd,
15326d98c517Smrg			       DRM_IOCTL_I915_GEM_MMAP_GTT,
15336d98c517Smrg			       &mmap_arg);
153422944501Smrg		if (ret != 0) {
153522944501Smrg			ret = -errno;
15369ce4edccSmrg			DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
15379ce4edccSmrg			    __FILE__, __LINE__,
15389ce4edccSmrg			    bo_gem->gem_handle, bo_gem->name,
15399ce4edccSmrg			    strerror(errno));
154020131375Smrg			if (--bo_gem->map_count == 0)
154120131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
154222944501Smrg			return ret;
154322944501Smrg		}
154422944501Smrg
154522944501Smrg		/* and mmap it */
1546aec75c42Sriastradh		ret = drmMap(bufmgr_gem->fd, mmap_arg.offset, bo->size,
1547aec75c42Sriastradh		    &bo_gem->gtt_virtual);
1548aec75c42Sriastradh		if (ret) {
154922944501Smrg			bo_gem->gtt_virtual = NULL;
15509ce4edccSmrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
15519ce4edccSmrg			    __FILE__, __LINE__,
15529ce4edccSmrg			    bo_gem->gem_handle, bo_gem->name,
15539ce4edccSmrg			    strerror(errno));
155420131375Smrg			if (--bo_gem->map_count == 0)
155520131375Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
155622944501Smrg			return ret;
155722944501Smrg		}
155822944501Smrg	}
155922944501Smrg
156022944501Smrg	bo->virtual = bo_gem->gtt_virtual;
156122944501Smrg
156222944501Smrg	DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
156322944501Smrg	    bo_gem->gtt_virtual);
156422944501Smrg
156520131375Smrg	return 0;
156620131375Smrg}
156720131375Smrg
1568424e9256Smrgint
1569a884aba1Smrgdrm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
157020131375Smrg{
157120131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
157220131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
157320131375Smrg	struct drm_i915_gem_set_domain set_domain;
157420131375Smrg	int ret;
157520131375Smrg
157620131375Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
157720131375Smrg
157820131375Smrg	ret = map_gtt(bo);
157920131375Smrg	if (ret) {
158020131375Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
158120131375Smrg		return ret;
158220131375Smrg	}
158320131375Smrg
158420131375Smrg	/* Now move it to the GTT domain so that the GPU and CPU
158520131375Smrg	 * caches are flushed and the GPU isn't actively using the
158620131375Smrg	 * buffer.
158720131375Smrg	 *
158820131375Smrg	 * The pagefault handler does this domain change for us when
158920131375Smrg	 * it has unbound the BO from the GTT, but it's up to us to
159020131375Smrg	 * tell it when we're about to use things if we had done
159120131375Smrg	 * rendering and it still happens to be bound to the GTT.
159220131375Smrg	 */
1593424e9256Smrg	memclear(set_domain);
159422944501Smrg	set_domain.handle = bo_gem->gem_handle;
159522944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
159622944501Smrg	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
15976d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
15986d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
15996d98c517Smrg		       &set_domain);
160022944501Smrg	if (ret != 0) {
16019ce4edccSmrg		DBG("%s:%d: Error setting domain %d: %s\n",
16029ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
16039ce4edccSmrg		    strerror(errno));
160422944501Smrg	}
160522944501Smrg
160620131375Smrg	drm_intel_gem_bo_mark_mmaps_incoherent(bo);
160720131375Smrg	VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
160822944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
160922944501Smrg
16106d98c517Smrg	return 0;
161122944501Smrg}
161222944501Smrg
161320131375Smrg/**
161420131375Smrg * Performs a mapping of the buffer object like the normal GTT
161520131375Smrg * mapping, but avoids waiting for the GPU to be done reading from or
161620131375Smrg * rendering to the buffer.
161720131375Smrg *
161820131375Smrg * This is used in the implementation of GL_ARB_map_buffer_range: The
161920131375Smrg * user asks to create a buffer, then does a mapping, fills some
162020131375Smrg * space, runs a drawing command, then asks to map it again without
162120131375Smrg * synchronizing because it guarantees that it won't write over the
162220131375Smrg * data that the GPU is busy using (or, more specifically, that if it
162320131375Smrg * does write over the data, it acknowledges that rendering is
162420131375Smrg * undefined).
162520131375Smrg */
162620131375Smrg
1627424e9256Smrgint
1628a884aba1Smrgdrm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
162922944501Smrg{
163022944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
163120131375Smrg#ifdef HAVE_VALGRIND
163220131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
163320131375Smrg#endif
163420131375Smrg	int ret;
163522944501Smrg
163620131375Smrg	/* If the CPU cache isn't coherent with the GTT, then use a
163720131375Smrg	 * regular synchronized mapping.  The problem is that we don't
163820131375Smrg	 * track where the buffer was last used on the CPU side in
163920131375Smrg	 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
164020131375Smrg	 * we would potentially corrupt the buffer even when the user
164120131375Smrg	 * does reasonable things.
164220131375Smrg	 */
164320131375Smrg	if (!bufmgr_gem->has_llc)
164420131375Smrg		return drm_intel_gem_bo_map_gtt(bo);
164522944501Smrg
164622944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
164720131375Smrg
164820131375Smrg	ret = map_gtt(bo);
164920131375Smrg	if (ret == 0) {
165020131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
165120131375Smrg		VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
165220131375Smrg	}
165320131375Smrg
165422944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
165522944501Smrg
165622944501Smrg	return ret;
165722944501Smrg}
165822944501Smrg
165922944501Smrgstatic int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
166022944501Smrg{
1661a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
166222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
166320131375Smrg	int ret = 0;
166422944501Smrg
166522944501Smrg	if (bo == NULL)
166622944501Smrg		return 0;
166722944501Smrg
1668a884aba1Smrg	if (bo_gem->is_userptr)
1669a884aba1Smrg		return 0;
1670a884aba1Smrg
1671a884aba1Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1672a884aba1Smrg
167322944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
167422944501Smrg
167520131375Smrg	if (bo_gem->map_count <= 0) {
167620131375Smrg		DBG("attempted to unmap an unmapped bo\n");
167720131375Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
167820131375Smrg		/* Preserve the old behaviour of just treating this as a
167920131375Smrg		 * no-op rather than reporting the error.
168020131375Smrg		 */
168120131375Smrg		return 0;
168220131375Smrg	}
168320131375Smrg
168420131375Smrg	if (bo_gem->mapped_cpu_write) {
168520131375Smrg		struct drm_i915_gem_sw_finish sw_finish;
168620131375Smrg
168720131375Smrg		/* Cause a flush to happen if the buffer's pinned for
168820131375Smrg		 * scanout, so the results show up in a timely manner.
168920131375Smrg		 * Unlike GTT set domains, this only does work if the
169020131375Smrg		 * buffer should be scanout-related.
169120131375Smrg		 */
1692424e9256Smrg		memclear(sw_finish);
169320131375Smrg		sw_finish.handle = bo_gem->gem_handle;
169420131375Smrg		ret = drmIoctl(bufmgr_gem->fd,
169520131375Smrg			       DRM_IOCTL_I915_GEM_SW_FINISH,
169620131375Smrg			       &sw_finish);
169720131375Smrg		ret = ret == -1 ? -errno : 0;
169820131375Smrg
169920131375Smrg		bo_gem->mapped_cpu_write = false;
170020131375Smrg	}
170122944501Smrg
170220131375Smrg	/* We need to unmap after every innovation as we cannot track
17032ee35494Smrg	 * an open vma for every bo as that will exhaust the system
170420131375Smrg	 * limits and cause later failures.
170520131375Smrg	 */
170620131375Smrg	if (--bo_gem->map_count == 0) {
170720131375Smrg		drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
170820131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
170920131375Smrg		bo->virtual = NULL;
171020131375Smrg	}
171122944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
171222944501Smrg
171322944501Smrg	return ret;
171422944501Smrg}
171522944501Smrg
1716424e9256Smrgint
1717a884aba1Smrgdrm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
171820131375Smrg{
171920131375Smrg	return drm_intel_gem_bo_unmap(bo);
172020131375Smrg}
172120131375Smrg
172222944501Smrgstatic int
172322944501Smrgdrm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
172422944501Smrg			 unsigned long size, const void *data)
172522944501Smrg{
172622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
172722944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
172822944501Smrg	struct drm_i915_gem_pwrite pwrite;
172922944501Smrg	int ret;
173022944501Smrg
1731a884aba1Smrg	if (bo_gem->is_userptr)
1732a884aba1Smrg		return -EINVAL;
1733a884aba1Smrg
1734424e9256Smrg	memclear(pwrite);
173522944501Smrg	pwrite.handle = bo_gem->gem_handle;
173622944501Smrg	pwrite.offset = offset;
173722944501Smrg	pwrite.size = size;
173822944501Smrg	pwrite.data_ptr = (uint64_t) (uintptr_t) data;
17396d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
17406d98c517Smrg		       DRM_IOCTL_I915_GEM_PWRITE,
17416d98c517Smrg		       &pwrite);
174222944501Smrg	if (ret != 0) {
174322944501Smrg		ret = -errno;
17449ce4edccSmrg		DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
17459ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
17469ce4edccSmrg		    (int)size, strerror(errno));
174722944501Smrg	}
174822944501Smrg
174922944501Smrg	return ret;
175022944501Smrg}
175122944501Smrg
175222944501Smrgstatic int
175322944501Smrgdrm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
175422944501Smrg{
175522944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
175622944501Smrg	struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
175722944501Smrg	int ret;
175822944501Smrg
1759424e9256Smrg	memclear(get_pipe_from_crtc_id);
176022944501Smrg	get_pipe_from_crtc_id.crtc_id = crtc_id;
17616d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
17626d98c517Smrg		       DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
17636d98c517Smrg		       &get_pipe_from_crtc_id);
176422944501Smrg	if (ret != 0) {
176522944501Smrg		/* We return -1 here to signal that we don't
176622944501Smrg		 * know which pipe is associated with this crtc.
176722944501Smrg		 * This lets the caller know that this information
176822944501Smrg		 * isn't available; using the wrong pipe for
176922944501Smrg		 * vblank waiting can cause the chipset to lock up
177022944501Smrg		 */
177122944501Smrg		return -1;
177222944501Smrg	}
177322944501Smrg
177422944501Smrg	return get_pipe_from_crtc_id.pipe;
177522944501Smrg}
177622944501Smrg
177722944501Smrgstatic int
177822944501Smrgdrm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
177922944501Smrg			     unsigned long size, void *data)
178022944501Smrg{
178122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
178222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
178322944501Smrg	struct drm_i915_gem_pread pread;
178422944501Smrg	int ret;
178522944501Smrg
1786a884aba1Smrg	if (bo_gem->is_userptr)
1787a884aba1Smrg		return -EINVAL;
1788a884aba1Smrg
1789424e9256Smrg	memclear(pread);
179022944501Smrg	pread.handle = bo_gem->gem_handle;
179122944501Smrg	pread.offset = offset;
179222944501Smrg	pread.size = size;
179322944501Smrg	pread.data_ptr = (uint64_t) (uintptr_t) data;
17946d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
17956d98c517Smrg		       DRM_IOCTL_I915_GEM_PREAD,
17966d98c517Smrg		       &pread);
179722944501Smrg	if (ret != 0) {
179822944501Smrg		ret = -errno;
17999ce4edccSmrg		DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
18009ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
18019ce4edccSmrg		    (int)size, strerror(errno));
180222944501Smrg	}
180322944501Smrg
180422944501Smrg	return ret;
180522944501Smrg}
180622944501Smrg
18079ce4edccSmrg/** Waits for all GPU rendering with the object to have completed. */
180822944501Smrgstatic void
180922944501Smrgdrm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
181022944501Smrg{
18119ce4edccSmrg	drm_intel_gem_bo_start_gtt_access(bo, 1);
181222944501Smrg}
181322944501Smrg
181420131375Smrg/**
181520131375Smrg * Waits on a BO for the given amount of time.
181620131375Smrg *
181720131375Smrg * @bo: buffer object to wait for
181820131375Smrg * @timeout_ns: amount of time to wait in nanoseconds.
181920131375Smrg *   If value is less than 0, an infinite wait will occur.
182020131375Smrg *
182120131375Smrg * Returns 0 if the wait was successful ie. the last batch referencing the
182220131375Smrg * object has completed within the allotted time. Otherwise some negative return
182320131375Smrg * value describes the error. Of particular interest is -ETIME when the wait has
182420131375Smrg * failed to yield the desired result.
182520131375Smrg *
182620131375Smrg * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
182720131375Smrg * the operation to give up after a certain amount of time. Another subtle
182820131375Smrg * difference is the internal locking semantics are different (this variant does
182920131375Smrg * not hold the lock for the duration of the wait). This makes the wait subject
183020131375Smrg * to a larger userspace race window.
183120131375Smrg *
183220131375Smrg * The implementation shall wait until the object is no longer actively
183320131375Smrg * referenced within a batch buffer at the time of the call. The wait will
183420131375Smrg * not guarantee that the buffer is re-issued via another thread, or an flinked
183520131375Smrg * handle. Userspace must make sure this race does not occur if such precision
183620131375Smrg * is important.
1837424e9256Smrg *
1838424e9256Smrg * Note that some kernels have broken the inifite wait for negative values
1839424e9256Smrg * promise, upgrade to latest stable kernels if this is the case.
184020131375Smrg */
1841424e9256Smrgint
1842a884aba1Smrgdrm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
184320131375Smrg{
184420131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
184520131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
184620131375Smrg	struct drm_i915_gem_wait wait;
184720131375Smrg	int ret;
184820131375Smrg
184920131375Smrg	if (!bufmgr_gem->has_wait_timeout) {
185020131375Smrg		DBG("%s:%d: Timed wait is not supported. Falling back to "
185120131375Smrg		    "infinite wait\n", __FILE__, __LINE__);
185220131375Smrg		if (timeout_ns) {
185320131375Smrg			drm_intel_gem_bo_wait_rendering(bo);
185420131375Smrg			return 0;
185520131375Smrg		} else {
185620131375Smrg			return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
185720131375Smrg		}
185820131375Smrg	}
185920131375Smrg
1860424e9256Smrg	memclear(wait);
186120131375Smrg	wait.bo_handle = bo_gem->gem_handle;
186220131375Smrg	wait.timeout_ns = timeout_ns;
186320131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
186420131375Smrg	if (ret == -1)
186520131375Smrg		return -errno;
186620131375Smrg
186720131375Smrg	return ret;
186820131375Smrg}
186920131375Smrg
187022944501Smrg/**
187122944501Smrg * Sets the object to the GTT read and possibly write domain, used by the X
187222944501Smrg * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
187322944501Smrg *
187422944501Smrg * In combination with drm_intel_gem_bo_pin() and manual fence management, we
187522944501Smrg * can do tiled pixmaps this way.
187622944501Smrg */
1877424e9256Smrgvoid
187822944501Smrgdrm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
187922944501Smrg{
188022944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
188122944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
188222944501Smrg	struct drm_i915_gem_set_domain set_domain;
188322944501Smrg	int ret;
188422944501Smrg
1885424e9256Smrg	memclear(set_domain);
188622944501Smrg	set_domain.handle = bo_gem->gem_handle;
188722944501Smrg	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
188822944501Smrg	set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
18896d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
18906d98c517Smrg		       DRM_IOCTL_I915_GEM_SET_DOMAIN,
18916d98c517Smrg		       &set_domain);
189222944501Smrg	if (ret != 0) {
18939ce4edccSmrg		DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
18949ce4edccSmrg		    __FILE__, __LINE__, bo_gem->gem_handle,
18959ce4edccSmrg		    set_domain.read_domains, set_domain.write_domain,
18969ce4edccSmrg		    strerror(errno));
189722944501Smrg	}
189822944501Smrg}
189922944501Smrg
190022944501Smrgstatic void
190122944501Smrgdrm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
190222944501Smrg{
190322944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1904424e9256Smrg	struct drm_gem_close close_bo;
1905424e9256Smrg	int i, ret;
190622944501Smrg
190722944501Smrg	free(bufmgr_gem->exec2_objects);
190822944501Smrg	free(bufmgr_gem->exec_objects);
190922944501Smrg	free(bufmgr_gem->exec_bos);
191022944501Smrg
191122944501Smrg	pthread_mutex_destroy(&bufmgr_gem->lock);
191222944501Smrg
191322944501Smrg	/* Free any cached buffer objects we were going to reuse */
1914aaba2545Smrg	for (i = 0; i < bufmgr_gem->num_buckets; i++) {
191522944501Smrg		struct drm_intel_gem_bo_bucket *bucket =
191622944501Smrg		    &bufmgr_gem->cache_bucket[i];
191722944501Smrg		drm_intel_bo_gem *bo_gem;
191822944501Smrg
191922944501Smrg		while (!DRMLISTEMPTY(&bucket->head)) {
192022944501Smrg			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
192122944501Smrg					      bucket->head.next, head);
192222944501Smrg			DRMLISTDEL(&bo_gem->head);
192322944501Smrg
192422944501Smrg			drm_intel_gem_bo_free(&bo_gem->bo);
192522944501Smrg		}
192622944501Smrg	}
192722944501Smrg
1928424e9256Smrg	/* Release userptr bo kept hanging around for optimisation. */
1929424e9256Smrg	if (bufmgr_gem->userptr_active.ptr) {
1930424e9256Smrg		memclear(close_bo);
1931424e9256Smrg		close_bo.handle = bufmgr_gem->userptr_active.handle;
1932424e9256Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1933424e9256Smrg		free(bufmgr_gem->userptr_active.ptr);
1934424e9256Smrg		if (ret)
1935424e9256Smrg			fprintf(stderr,
1936424e9256Smrg				"Failed to release test userptr object! (%d) "
1937424e9256Smrg				"i915 kernel driver may not be sane!\n", errno);
1938424e9256Smrg	}
1939424e9256Smrg
194022944501Smrg	free(bufmgr);
194122944501Smrg}
194222944501Smrg
194322944501Smrg/**
194422944501Smrg * Adds the target buffer to the validation list and adds the relocation
194522944501Smrg * to the reloc_buffer's relocation list.
194622944501Smrg *
194722944501Smrg * The relocation entry at the given offset must already contain the
194822944501Smrg * precomputed relocation value, because the kernel will optimize out
194922944501Smrg * the relocation entry write when the buffer hasn't moved from the
195022944501Smrg * last known offset in target_bo.
195122944501Smrg */
195222944501Smrgstatic int
195322944501Smrgdo_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
195422944501Smrg		 drm_intel_bo *target_bo, uint32_t target_offset,
195522944501Smrg		 uint32_t read_domains, uint32_t write_domain,
195620131375Smrg		 bool need_fence)
195722944501Smrg{
195822944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
195922944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
196022944501Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
196120131375Smrg	bool fenced_command;
196222944501Smrg
196322944501Smrg	if (bo_gem->has_error)
196422944501Smrg		return -ENOMEM;
196522944501Smrg
196622944501Smrg	if (target_bo_gem->has_error) {
196720131375Smrg		bo_gem->has_error = true;
196822944501Smrg		return -ENOMEM;
196922944501Smrg	}
197022944501Smrg
197122944501Smrg	/* We never use HW fences for rendering on 965+ */
197222944501Smrg	if (bufmgr_gem->gen >= 4)
197320131375Smrg		need_fence = false;
197422944501Smrg
19759ce4edccSmrg	fenced_command = need_fence;
19769ce4edccSmrg	if (target_bo_gem->tiling_mode == I915_TILING_NONE)
197720131375Smrg		need_fence = false;
19789ce4edccSmrg
197922944501Smrg	/* Create a new relocation list if needed */
198022944501Smrg	if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
198122944501Smrg		return -ENOMEM;
198222944501Smrg
198322944501Smrg	/* Check overflow */
198422944501Smrg	assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
198522944501Smrg
198622944501Smrg	/* Check args */
198722944501Smrg	assert(offset <= bo->size - 4);
198822944501Smrg	assert((write_domain & (write_domain - 1)) == 0);
198922944501Smrg
19903c748557Ssnj	/* An object needing a fence is a tiled buffer, so it won't have
19913c748557Ssnj	 * relocs to other buffers.
19923c748557Ssnj	 */
19933c748557Ssnj	if (need_fence) {
19943c748557Ssnj		assert(target_bo_gem->reloc_count == 0);
19953c748557Ssnj		target_bo_gem->reloc_tree_fences = 1;
19963c748557Ssnj	}
19973c748557Ssnj
199822944501Smrg	/* Make sure that we're not adding a reloc to something whose size has
199922944501Smrg	 * already been accounted for.
200022944501Smrg	 */
200122944501Smrg	assert(!bo_gem->used_as_reloc_target);
2002aaba2545Smrg	if (target_bo_gem != bo_gem) {
200320131375Smrg		target_bo_gem->used_as_reloc_target = true;
2004aaba2545Smrg		bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
20053c748557Ssnj		bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
2006aaba2545Smrg	}
200722944501Smrg
200822944501Smrg	bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
2009aaba2545Smrg	if (target_bo != bo)
2010aaba2545Smrg		drm_intel_gem_bo_reference(target_bo);
20119ce4edccSmrg	if (fenced_command)
201222944501Smrg		bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
201322944501Smrg			DRM_INTEL_RELOC_FENCE;
201422944501Smrg	else
201522944501Smrg		bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
201622944501Smrg
2017fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2018fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2019fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].target_handle =
2020fe517fc9Smrg	    target_bo_gem->gem_handle;
2021fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2022fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2023fe517fc9Smrg	bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
202422944501Smrg	bo_gem->reloc_count++;
202522944501Smrg
202622944501Smrg	return 0;
202722944501Smrg}
202822944501Smrg
2029fe517fc9Smrgstatic void
2030fe517fc9Smrgdrm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2031fe517fc9Smrg{
2032fe517fc9Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
20330655efefSmrg
20340655efefSmrg	if (enable)
20350655efefSmrg		bo_gem->kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
20360655efefSmrg	else
20370655efefSmrg		bo_gem->kflags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2038fe517fc9Smrg}
2039fe517fc9Smrg
2040fe517fc9Smrgstatic int
2041fe517fc9Smrgdrm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2042fe517fc9Smrg{
2043fe517fc9Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2044fe517fc9Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2045fe517fc9Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2046fe517fc9Smrg	if (bo_gem->has_error)
2047fe517fc9Smrg		return -ENOMEM;
2048fe517fc9Smrg
2049fe517fc9Smrg	if (target_bo_gem->has_error) {
2050fe517fc9Smrg		bo_gem->has_error = true;
2051fe517fc9Smrg		return -ENOMEM;
2052fe517fc9Smrg	}
2053fe517fc9Smrg
20540655efefSmrg	if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED))
2055fe517fc9Smrg		return -EINVAL;
2056fe517fc9Smrg	if (target_bo_gem == bo_gem)
2057fe517fc9Smrg		return -EINVAL;
2058fe517fc9Smrg
2059fe517fc9Smrg	if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2060fe517fc9Smrg		int new_size = bo_gem->softpin_target_size * 2;
2061fe517fc9Smrg		if (new_size == 0)
2062fe517fc9Smrg			new_size = bufmgr_gem->max_relocs;
2063fe517fc9Smrg
2064fe517fc9Smrg		bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2065fe517fc9Smrg				sizeof(drm_intel_bo *));
2066fe517fc9Smrg		if (!bo_gem->softpin_target)
2067fe517fc9Smrg			return -ENOMEM;
2068fe517fc9Smrg
2069fe517fc9Smrg		bo_gem->softpin_target_size = new_size;
2070fe517fc9Smrg	}
2071fe517fc9Smrg	bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2072fe517fc9Smrg	drm_intel_gem_bo_reference(target_bo);
2073fe517fc9Smrg	bo_gem->softpin_target_count++;
2074fe517fc9Smrg
2075fe517fc9Smrg	return 0;
2076fe517fc9Smrg}
2077fe517fc9Smrg
207822944501Smrgstatic int
207922944501Smrgdrm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
208022944501Smrg			    drm_intel_bo *target_bo, uint32_t target_offset,
208122944501Smrg			    uint32_t read_domains, uint32_t write_domain)
208222944501Smrg{
208322944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2084fe517fc9Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
208522944501Smrg
20860655efefSmrg	if (target_bo_gem->kflags & EXEC_OBJECT_PINNED)
2087fe517fc9Smrg		return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2088fe517fc9Smrg	else
2089fe517fc9Smrg		return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2090fe517fc9Smrg					read_domains, write_domain,
2091fe517fc9Smrg					!bufmgr_gem->fenced_relocs);
209222944501Smrg}
209322944501Smrg
209422944501Smrgstatic int
209522944501Smrgdrm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
209622944501Smrg				  drm_intel_bo *target_bo,
209722944501Smrg				  uint32_t target_offset,
209822944501Smrg				  uint32_t read_domains, uint32_t write_domain)
209922944501Smrg{
210022944501Smrg	return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
210120131375Smrg				read_domains, write_domain, true);
210220131375Smrg}
210320131375Smrg
2104424e9256Smrgint
210520131375Smrgdrm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
210620131375Smrg{
210720131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
210820131375Smrg
210920131375Smrg	return bo_gem->reloc_count;
211020131375Smrg}
211120131375Smrg
211220131375Smrg/**
211320131375Smrg * Removes existing relocation entries in the BO after "start".
211420131375Smrg *
211520131375Smrg * This allows a user to avoid a two-step process for state setup with
211620131375Smrg * counting up all the buffer objects and doing a
211720131375Smrg * drm_intel_bufmgr_check_aperture_space() before emitting any of the
211820131375Smrg * relocations for the state setup.  Instead, save the state of the
211920131375Smrg * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
212020131375Smrg * state, and then check if it still fits in the aperture.
212120131375Smrg *
212220131375Smrg * Any further drm_intel_bufmgr_check_aperture_space() queries
212320131375Smrg * involving this buffer in the tree are undefined after this call.
2124fe517fc9Smrg *
2125fe517fc9Smrg * This also removes all softpinned targets being referenced by the BO.
212620131375Smrg */
2127424e9256Smrgvoid
212820131375Smrgdrm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
212920131375Smrg{
2130a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
213120131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
213220131375Smrg	int i;
213320131375Smrg	struct timespec time;
213420131375Smrg
213520131375Smrg	clock_gettime(CLOCK_MONOTONIC, &time);
213620131375Smrg
213720131375Smrg	assert(bo_gem->reloc_count >= start);
2138a884aba1Smrg
213920131375Smrg	/* Unreference the cleared target buffers */
2140a884aba1Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
2141a884aba1Smrg
214220131375Smrg	for (i = start; i < bo_gem->reloc_count; i++) {
214320131375Smrg		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
214420131375Smrg		if (&target_bo_gem->bo != bo) {
214520131375Smrg			bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
214620131375Smrg			drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
214720131375Smrg								  time.tv_sec);
214820131375Smrg		}
214920131375Smrg	}
215020131375Smrg	bo_gem->reloc_count = start;
2151a884aba1Smrg
2152fe517fc9Smrg	for (i = 0; i < bo_gem->softpin_target_count; i++) {
2153fe517fc9Smrg		drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2154fe517fc9Smrg		drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2155fe517fc9Smrg	}
2156fe517fc9Smrg	bo_gem->softpin_target_count = 0;
2157fe517fc9Smrg
2158a884aba1Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
2159a884aba1Smrg
216022944501Smrg}
216122944501Smrg
216222944501Smrg/**
216322944501Smrg * Walk the tree of relocations rooted at BO and accumulate the list of
216422944501Smrg * validations to be performed and update the relocation buffers with
216522944501Smrg * index values into the validation list.
216622944501Smrg */
216722944501Smrgstatic void
216822944501Smrgdrm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
216922944501Smrg{
217022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
217122944501Smrg	int i;
217222944501Smrg
217322944501Smrg	if (bo_gem->relocs == NULL)
217422944501Smrg		return;
217522944501Smrg
217622944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
217722944501Smrg		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
217822944501Smrg
2179aaba2545Smrg		if (target_bo == bo)
2180aaba2545Smrg			continue;
2181aaba2545Smrg
218220131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
218320131375Smrg
218422944501Smrg		/* Continue walking the tree depth-first. */
218522944501Smrg		drm_intel_gem_bo_process_reloc(target_bo);
218622944501Smrg
218722944501Smrg		/* Add the target to the validate list */
218822944501Smrg		drm_intel_add_validate_buffer(target_bo);
218922944501Smrg	}
219022944501Smrg}
219122944501Smrg
219222944501Smrgstatic void
219322944501Smrgdrm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
219422944501Smrg{
219522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
219622944501Smrg	int i;
219722944501Smrg
2198fe517fc9Smrg	if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
219922944501Smrg		return;
220022944501Smrg
220122944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
220222944501Smrg		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
220322944501Smrg		int need_fence;
220422944501Smrg
2205aaba2545Smrg		if (target_bo == bo)
2206aaba2545Smrg			continue;
2207aaba2545Smrg
220820131375Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
220920131375Smrg
221022944501Smrg		/* Continue walking the tree depth-first. */
221122944501Smrg		drm_intel_gem_bo_process_reloc2(target_bo);
221222944501Smrg
221322944501Smrg		need_fence = (bo_gem->reloc_target_info[i].flags &
221422944501Smrg			      DRM_INTEL_RELOC_FENCE);
221522944501Smrg
221622944501Smrg		/* Add the target to the validate list */
221722944501Smrg		drm_intel_add_validate_buffer2(target_bo, need_fence);
221822944501Smrg	}
2219fe517fc9Smrg
2220fe517fc9Smrg	for (i = 0; i < bo_gem->softpin_target_count; i++) {
2221fe517fc9Smrg		drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2222fe517fc9Smrg
2223fe517fc9Smrg		if (target_bo == bo)
2224fe517fc9Smrg			continue;
2225fe517fc9Smrg
2226fe517fc9Smrg		drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2227fe517fc9Smrg		drm_intel_gem_bo_process_reloc2(target_bo);
2228fe517fc9Smrg		drm_intel_add_validate_buffer2(target_bo, false);
2229fe517fc9Smrg	}
223022944501Smrg}
223122944501Smrg
223222944501Smrg
223322944501Smrgstatic void
223422944501Smrgdrm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
223522944501Smrg{
223622944501Smrg	int i;
223722944501Smrg
223822944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
223922944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
224022944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
224122944501Smrg
224222944501Smrg		/* Update the buffer offset */
224320131375Smrg		if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2244fe517fc9Smrg			DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2245d82d45b3Sjoerg			    bo_gem->gem_handle, bo_gem->name,
2246fe517fc9Smrg			    upper_32_bits(bo->offset64),
2247fe517fc9Smrg			    lower_32_bits(bo->offset64),
2248fe517fc9Smrg			    upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2249fe517fc9Smrg			    lower_32_bits(bufmgr_gem->exec_objects[i].offset));
225020131375Smrg			bo->offset64 = bufmgr_gem->exec_objects[i].offset;
225122944501Smrg			bo->offset = bufmgr_gem->exec_objects[i].offset;
225222944501Smrg		}
225322944501Smrg	}
225422944501Smrg}
225522944501Smrg
225622944501Smrgstatic void
225722944501Smrgdrm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
225822944501Smrg{
225922944501Smrg	int i;
226022944501Smrg
226122944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
226222944501Smrg		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
226322944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
226422944501Smrg
226522944501Smrg		/* Update the buffer offset */
226620131375Smrg		if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2267fe517fc9Smrg			/* If we're seeing softpinned object here it means that the kernel
2268fe517fc9Smrg			 * has relocated our object... Indicating a programming error
2269fe517fc9Smrg			 */
22700655efefSmrg			assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED));
2271fe517fc9Smrg			DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2272d82d45b3Sjoerg			    bo_gem->gem_handle, bo_gem->name,
2273fe517fc9Smrg			    upper_32_bits(bo->offset64),
2274fe517fc9Smrg			    lower_32_bits(bo->offset64),
2275fe517fc9Smrg			    upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2276fe517fc9Smrg			    lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
227720131375Smrg			bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
227822944501Smrg			bo->offset = bufmgr_gem->exec2_objects[i].offset;
227922944501Smrg		}
228022944501Smrg	}
228122944501Smrg}
228222944501Smrg
2283424e9256Smrgvoid
228420131375Smrgdrm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
228520131375Smrg			      int x1, int y1, int width, int height,
228620131375Smrg			      enum aub_dump_bmp_format format,
228720131375Smrg			      int pitch, int offset)
228820131375Smrg{
228920131375Smrg}
229020131375Smrg
229120131375Smrgstatic int
229220131375Smrgdrm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
229320131375Smrg		      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
229420131375Smrg{
229520131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
229620131375Smrg	struct drm_i915_gem_execbuffer execbuf;
229720131375Smrg	int ret, i;
229820131375Smrg
2299fe517fc9Smrg	if (to_bo_gem(bo)->has_error)
230020131375Smrg		return -ENOMEM;
230120131375Smrg
230220131375Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
230320131375Smrg	/* Update indices and set up the validate list. */
230420131375Smrg	drm_intel_gem_bo_process_reloc(bo);
230520131375Smrg
230620131375Smrg	/* Add the batch buffer to the validation list.  There are no
230720131375Smrg	 * relocations pointing to it.
230820131375Smrg	 */
230920131375Smrg	drm_intel_add_validate_buffer(bo);
231020131375Smrg
2311424e9256Smrg	memclear(execbuf);
231220131375Smrg	execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
231320131375Smrg	execbuf.buffer_count = bufmgr_gem->exec_count;
231420131375Smrg	execbuf.batch_start_offset = 0;
231520131375Smrg	execbuf.batch_len = used;
231620131375Smrg	execbuf.cliprects_ptr = (uintptr_t) cliprects;
231720131375Smrg	execbuf.num_cliprects = num_cliprects;
231820131375Smrg	execbuf.DR1 = 0;
231920131375Smrg	execbuf.DR4 = DR4;
232020131375Smrg
232120131375Smrg	ret = drmIoctl(bufmgr_gem->fd,
232220131375Smrg		       DRM_IOCTL_I915_GEM_EXECBUFFER,
232320131375Smrg		       &execbuf);
232420131375Smrg	if (ret != 0) {
232520131375Smrg		ret = -errno;
232620131375Smrg		if (errno == ENOSPC) {
232720131375Smrg			DBG("Execbuffer fails to pin. "
232820131375Smrg			    "Estimate: %u. Actual: %u. Available: %u\n",
232920131375Smrg			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
233020131375Smrg							       bufmgr_gem->
233120131375Smrg							       exec_count),
233220131375Smrg			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
233320131375Smrg							      bufmgr_gem->
233420131375Smrg							      exec_count),
233520131375Smrg			    (unsigned int)bufmgr_gem->gtt_size);
233620131375Smrg		}
233720131375Smrg	}
233820131375Smrg	drm_intel_update_buffer_offsets(bufmgr_gem);
233920131375Smrg
234020131375Smrg	if (bufmgr_gem->bufmgr.debug)
234120131375Smrg		drm_intel_gem_dump_validation_list(bufmgr_gem);
234220131375Smrg
234320131375Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2344fe517fc9Smrg		drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
234520131375Smrg
234620131375Smrg		bo_gem->idle = false;
234720131375Smrg
234820131375Smrg		/* Disconnect the buffer from the validate list */
234920131375Smrg		bo_gem->validate_index = -1;
235020131375Smrg		bufmgr_gem->exec_bos[i] = NULL;
235120131375Smrg	}
235220131375Smrg	bufmgr_gem->exec_count = 0;
235320131375Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
235420131375Smrg
235520131375Smrg	return ret;
235620131375Smrg}
235720131375Smrg
235820131375Smrgstatic int
235920131375Smrgdo_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
236020131375Smrg	 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
23612ee35494Smrg	 int in_fence, int *out_fence,
236220131375Smrg	 unsigned int flags)
236320131375Smrg{
236420131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
236520131375Smrg	struct drm_i915_gem_execbuffer2 execbuf;
236620131375Smrg	int ret = 0;
236720131375Smrg	int i;
236820131375Smrg
2369fe517fc9Smrg	if (to_bo_gem(bo)->has_error)
2370fe517fc9Smrg		return -ENOMEM;
2371fe517fc9Smrg
237220131375Smrg	switch (flags & 0x7) {
237320131375Smrg	default:
237420131375Smrg		return -EINVAL;
237520131375Smrg	case I915_EXEC_BLT:
23769ce4edccSmrg		if (!bufmgr_gem->has_blt)
23779ce4edccSmrg			return -EINVAL;
23789ce4edccSmrg		break;
23799ce4edccSmrg	case I915_EXEC_BSD:
23809ce4edccSmrg		if (!bufmgr_gem->has_bsd)
23819ce4edccSmrg			return -EINVAL;
23829ce4edccSmrg		break;
238320131375Smrg	case I915_EXEC_VEBOX:
238420131375Smrg		if (!bufmgr_gem->has_vebox)
238520131375Smrg			return -EINVAL;
238620131375Smrg		break;
23879ce4edccSmrg	case I915_EXEC_RENDER:
23889ce4edccSmrg	case I915_EXEC_DEFAULT:
23899ce4edccSmrg		break;
23909ce4edccSmrg	}
2391aaba2545Smrg
239222944501Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
239322944501Smrg	/* Update indices and set up the validate list. */
239422944501Smrg	drm_intel_gem_bo_process_reloc2(bo);
239522944501Smrg
239622944501Smrg	/* Add the batch buffer to the validation list.  There are no relocations
239722944501Smrg	 * pointing to it.
239822944501Smrg	 */
239922944501Smrg	drm_intel_add_validate_buffer2(bo, 0);
240022944501Smrg
2401424e9256Smrg	memclear(execbuf);
240222944501Smrg	execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
240322944501Smrg	execbuf.buffer_count = bufmgr_gem->exec_count;
240422944501Smrg	execbuf.batch_start_offset = 0;
240522944501Smrg	execbuf.batch_len = used;
240622944501Smrg	execbuf.cliprects_ptr = (uintptr_t)cliprects;
240722944501Smrg	execbuf.num_cliprects = num_cliprects;
240822944501Smrg	execbuf.DR1 = 0;
240922944501Smrg	execbuf.DR4 = DR4;
241020131375Smrg	execbuf.flags = flags;
241120131375Smrg	if (ctx == NULL)
241220131375Smrg		i915_execbuffer2_set_context_id(execbuf, 0);
241320131375Smrg	else
241420131375Smrg		i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
241522944501Smrg	execbuf.rsvd2 = 0;
24162ee35494Smrg	if (in_fence != -1) {
24172ee35494Smrg		execbuf.rsvd2 = in_fence;
24182ee35494Smrg		execbuf.flags |= I915_EXEC_FENCE_IN;
24192ee35494Smrg	}
24202ee35494Smrg	if (out_fence != NULL) {
24212ee35494Smrg		*out_fence = -1;
24222ee35494Smrg		execbuf.flags |= I915_EXEC_FENCE_OUT;
24232ee35494Smrg	}
242422944501Smrg
242520131375Smrg	if (bufmgr_gem->no_exec)
242620131375Smrg		goto skip_execution;
242720131375Smrg
24286d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
24292ee35494Smrg		       DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
24306d98c517Smrg		       &execbuf);
243122944501Smrg	if (ret != 0) {
243222944501Smrg		ret = -errno;
24336d98c517Smrg		if (ret == -ENOSPC) {
24349ce4edccSmrg			DBG("Execbuffer fails to pin. "
24359ce4edccSmrg			    "Estimate: %u. Actual: %u. Available: %u\n",
24369ce4edccSmrg			    drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
24379ce4edccSmrg							       bufmgr_gem->exec_count),
24389ce4edccSmrg			    drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
24399ce4edccSmrg							      bufmgr_gem->exec_count),
24409ce4edccSmrg			    (unsigned int) bufmgr_gem->gtt_size);
244122944501Smrg		}
244222944501Smrg	}
244322944501Smrg	drm_intel_update_buffer_offsets2(bufmgr_gem);
244422944501Smrg
24452ee35494Smrg	if (ret == 0 && out_fence != NULL)
24462ee35494Smrg		*out_fence = execbuf.rsvd2 >> 32;
24472ee35494Smrg
244820131375Smrgskip_execution:
244922944501Smrg	if (bufmgr_gem->bufmgr.debug)
245022944501Smrg		drm_intel_gem_dump_validation_list(bufmgr_gem);
245122944501Smrg
245222944501Smrg	for (i = 0; i < bufmgr_gem->exec_count; i++) {
2453fe517fc9Smrg		drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
245422944501Smrg
245520131375Smrg		bo_gem->idle = false;
245620131375Smrg
245722944501Smrg		/* Disconnect the buffer from the validate list */
245822944501Smrg		bo_gem->validate_index = -1;
245922944501Smrg		bufmgr_gem->exec_bos[i] = NULL;
246022944501Smrg	}
246122944501Smrg	bufmgr_gem->exec_count = 0;
246222944501Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
246322944501Smrg
246422944501Smrg	return ret;
246522944501Smrg}
246622944501Smrg
2467aaba2545Smrgstatic int
2468aaba2545Smrgdrm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2469aaba2545Smrg		       drm_clip_rect_t *cliprects, int num_cliprects,
2470aaba2545Smrg		       int DR4)
2471aaba2545Smrg{
247220131375Smrg	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
24732ee35494Smrg			-1, NULL, I915_EXEC_RENDER);
247420131375Smrg}
247520131375Smrg
247620131375Smrgstatic int
247720131375Smrgdrm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
247820131375Smrg			drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
247920131375Smrg			unsigned int flags)
248020131375Smrg{
248120131375Smrg	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
24822ee35494Smrg			-1, NULL, flags);
248320131375Smrg}
248420131375Smrg
2485424e9256Smrgint
248620131375Smrgdrm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
248720131375Smrg			      int used, unsigned int flags)
248820131375Smrg{
24892ee35494Smrg	return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
24902ee35494Smrg}
24912ee35494Smrg
24922ee35494Smrgint
24932ee35494Smrgdrm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
24942ee35494Smrg			    drm_intel_context *ctx,
24952ee35494Smrg			    int used,
24962ee35494Smrg			    int in_fence,
24972ee35494Smrg			    int *out_fence,
24982ee35494Smrg			    unsigned int flags)
24992ee35494Smrg{
25002ee35494Smrg	return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
2501aaba2545Smrg}
2502aaba2545Smrg
250322944501Smrgstatic int
250422944501Smrgdrm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
250522944501Smrg{
250622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
250722944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
250822944501Smrg	struct drm_i915_gem_pin pin;
250922944501Smrg	int ret;
251022944501Smrg
2511424e9256Smrg	memclear(pin);
251222944501Smrg	pin.handle = bo_gem->gem_handle;
251322944501Smrg	pin.alignment = alignment;
251422944501Smrg
25156d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
25166d98c517Smrg		       DRM_IOCTL_I915_GEM_PIN,
25176d98c517Smrg		       &pin);
251822944501Smrg	if (ret != 0)
251922944501Smrg		return -errno;
252022944501Smrg
252120131375Smrg	bo->offset64 = pin.offset;
252222944501Smrg	bo->offset = pin.offset;
252322944501Smrg	return 0;
252422944501Smrg}
252522944501Smrg
252622944501Smrgstatic int
252722944501Smrgdrm_intel_gem_bo_unpin(drm_intel_bo *bo)
252822944501Smrg{
252922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
253022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
253122944501Smrg	struct drm_i915_gem_unpin unpin;
253222944501Smrg	int ret;
253322944501Smrg
2534424e9256Smrg	memclear(unpin);
253522944501Smrg	unpin.handle = bo_gem->gem_handle;
253622944501Smrg
25376d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
253822944501Smrg	if (ret != 0)
253922944501Smrg		return -errno;
254022944501Smrg
254122944501Smrg	return 0;
254222944501Smrg}
254322944501Smrg
254422944501Smrgstatic int
25456d98c517Smrgdrm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
25466d98c517Smrg				     uint32_t tiling_mode,
25476d98c517Smrg				     uint32_t stride)
254822944501Smrg{
254922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
255022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
255122944501Smrg	struct drm_i915_gem_set_tiling set_tiling;
255222944501Smrg	int ret;
255322944501Smrg
25546d98c517Smrg	if (bo_gem->global_name == 0 &&
25556d98c517Smrg	    tiling_mode == bo_gem->tiling_mode &&
25566d98c517Smrg	    stride == bo_gem->stride)
255722944501Smrg		return 0;
255822944501Smrg
255922944501Smrg	memset(&set_tiling, 0, sizeof(set_tiling));
256022944501Smrg	do {
25616d98c517Smrg		/* set_tiling is slightly broken and overwrites the
25626d98c517Smrg		 * input on the error path, so we have to open code
25636d98c517Smrg		 * rmIoctl.
25646d98c517Smrg		 */
25656d98c517Smrg		set_tiling.handle = bo_gem->gem_handle;
25666d98c517Smrg		set_tiling.tiling_mode = tiling_mode;
256722944501Smrg		set_tiling.stride = stride;
256822944501Smrg
256922944501Smrg		ret = ioctl(bufmgr_gem->fd,
257022944501Smrg			    DRM_IOCTL_I915_GEM_SET_TILING,
257122944501Smrg			    &set_tiling);
25726d98c517Smrg	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
25736d98c517Smrg	if (ret == -1)
25746d98c517Smrg		return -errno;
25756d98c517Smrg
25766d98c517Smrg	bo_gem->tiling_mode = set_tiling.tiling_mode;
25776d98c517Smrg	bo_gem->swizzle_mode = set_tiling.swizzle_mode;
25786d98c517Smrg	bo_gem->stride = set_tiling.stride;
25796d98c517Smrg	return 0;
25806d98c517Smrg}
25816d98c517Smrg
25826d98c517Smrgstatic int
25836d98c517Smrgdrm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
25846d98c517Smrg			    uint32_t stride)
25856d98c517Smrg{
25866d98c517Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
25876d98c517Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
25886d98c517Smrg	int ret;
25896d98c517Smrg
2590a884aba1Smrg	/* Tiling with userptr surfaces is not supported
2591a884aba1Smrg	 * on all hardware so refuse it for time being.
2592a884aba1Smrg	 */
2593a884aba1Smrg	if (bo_gem->is_userptr)
2594a884aba1Smrg		return -EINVAL;
2595a884aba1Smrg
25966d98c517Smrg	/* Linear buffers have no stride. By ensuring that we only ever use
25976d98c517Smrg	 * stride 0 with linear buffers, we simplify our code.
25986d98c517Smrg	 */
25996d98c517Smrg	if (*tiling_mode == I915_TILING_NONE)
26006d98c517Smrg		stride = 0;
26016d98c517Smrg
26026d98c517Smrg	ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
26036d98c517Smrg	if (ret == 0)
2604fe517fc9Smrg		drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
260522944501Smrg
260622944501Smrg	*tiling_mode = bo_gem->tiling_mode;
2607aaba2545Smrg	return ret;
260822944501Smrg}
260922944501Smrg
261022944501Smrgstatic int
261122944501Smrgdrm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
261222944501Smrg			    uint32_t * swizzle_mode)
261322944501Smrg{
261422944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
261522944501Smrg
261622944501Smrg	*tiling_mode = bo_gem->tiling_mode;
261722944501Smrg	*swizzle_mode = bo_gem->swizzle_mode;
261822944501Smrg	return 0;
261922944501Smrg}
262022944501Smrg
2621fe517fc9Smrgstatic int
2622fe517fc9Smrgdrm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2623fe517fc9Smrg{
2624fe517fc9Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2625fe517fc9Smrg
2626fe517fc9Smrg	bo->offset64 = offset;
2627fe517fc9Smrg	bo->offset = offset;
26280655efefSmrg	bo_gem->kflags |= EXEC_OBJECT_PINNED;
26290655efefSmrg
2630fe517fc9Smrg	return 0;
2631fe517fc9Smrg}
2632fe517fc9Smrg
2633424e9256Smrgdrm_intel_bo *
263420131375Smrgdrm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
263520131375Smrg{
263620131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
263720131375Smrg	int ret;
263820131375Smrg	uint32_t handle;
263920131375Smrg	drm_intel_bo_gem *bo_gem;
264020131375Smrg	struct drm_i915_gem_get_tiling get_tiling;
264120131375Smrg
2642fe517fc9Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
264320131375Smrg	ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2644fe517fc9Smrg	if (ret) {
2645fe517fc9Smrg		DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2646fe517fc9Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
2647fe517fc9Smrg		return NULL;
2648fe517fc9Smrg	}
264920131375Smrg
265020131375Smrg	/*
265120131375Smrg	 * See if the kernel has already returned this buffer to us. Just as
265220131375Smrg	 * for named buffers, we must not create two bo's pointing at the same
265320131375Smrg	 * kernel object
265420131375Smrg	 */
26552ee35494Smrg	HASH_FIND(handle_hh, bufmgr_gem->handle_table,
26562ee35494Smrg		  &handle, sizeof(handle), bo_gem);
26572ee35494Smrg	if (bo_gem) {
26582ee35494Smrg		drm_intel_gem_bo_reference(&bo_gem->bo);
26592ee35494Smrg		goto out;
266020131375Smrg	}
266120131375Smrg
266220131375Smrg	bo_gem = calloc(1, sizeof(*bo_gem));
26632ee35494Smrg	if (!bo_gem)
26642ee35494Smrg		goto out;
26652ee35494Smrg
26662ee35494Smrg	atomic_set(&bo_gem->refcount, 1);
26672ee35494Smrg	DRMINITLISTHEAD(&bo_gem->vma_list);
26682ee35494Smrg
266920131375Smrg	/* Determine size of bo.  The fd-to-handle ioctl really should
267020131375Smrg	 * return the size, but it doesn't.  If we have kernel 3.12 or
267120131375Smrg	 * later, we can lseek on the prime fd to get the size.  Older
267220131375Smrg	 * kernels will just fail, in which case we fall back to the
267320131375Smrg	 * provided (estimated or guess size). */
267420131375Smrg	ret = lseek(prime_fd, 0, SEEK_END);
267520131375Smrg	if (ret != -1)
267620131375Smrg		bo_gem->bo.size = ret;
267720131375Smrg	else
267820131375Smrg		bo_gem->bo.size = size;
267920131375Smrg
268020131375Smrg	bo_gem->bo.handle = handle;
268120131375Smrg	bo_gem->bo.bufmgr = bufmgr;
268220131375Smrg
268320131375Smrg	bo_gem->gem_handle = handle;
26842ee35494Smrg	HASH_ADD(handle_hh, bufmgr_gem->handle_table,
26852ee35494Smrg		 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
268620131375Smrg
268720131375Smrg	bo_gem->name = "prime";
268820131375Smrg	bo_gem->validate_index = -1;
268920131375Smrg	bo_gem->reloc_tree_fences = 0;
269020131375Smrg	bo_gem->used_as_reloc_target = false;
269120131375Smrg	bo_gem->has_error = false;
269220131375Smrg	bo_gem->reusable = false;
269320131375Smrg
2694424e9256Smrg	memclear(get_tiling);
269520131375Smrg	get_tiling.handle = bo_gem->gem_handle;
26962ee35494Smrg	if (drmIoctl(bufmgr_gem->fd,
26972ee35494Smrg		     DRM_IOCTL_I915_GEM_GET_TILING,
26982ee35494Smrg		     &get_tiling))
26992ee35494Smrg		goto err;
27002ee35494Smrg
270120131375Smrg	bo_gem->tiling_mode = get_tiling.tiling_mode;
270220131375Smrg	bo_gem->swizzle_mode = get_tiling.swizzle_mode;
270320131375Smrg	/* XXX stride is unknown */
2704fe517fc9Smrg	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
270520131375Smrg
27062ee35494Smrgout:
27072ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
270820131375Smrg	return &bo_gem->bo;
27092ee35494Smrg
27102ee35494Smrgerr:
27112ee35494Smrg	drm_intel_gem_bo_free(&bo_gem->bo);
27122ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
27132ee35494Smrg	return NULL;
271420131375Smrg}
271520131375Smrg
2716424e9256Smrgint
271720131375Smrgdrm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
271820131375Smrg{
271920131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
272020131375Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
272120131375Smrg
272220131375Smrg	if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
272320131375Smrg			       DRM_CLOEXEC, prime_fd) != 0)
272420131375Smrg		return -errno;
272520131375Smrg
272620131375Smrg	bo_gem->reusable = false;
272720131375Smrg
272820131375Smrg	return 0;
272920131375Smrg}
273020131375Smrg
273122944501Smrgstatic int
273222944501Smrgdrm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
273322944501Smrg{
273422944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
273522944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
273622944501Smrg
273722944501Smrg	if (!bo_gem->global_name) {
273820131375Smrg		struct drm_gem_flink flink;
273920131375Smrg
2740424e9256Smrg		memclear(flink);
274122944501Smrg		flink.handle = bo_gem->gem_handle;
27422ee35494Smrg		if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
27432ee35494Smrg			return -errno;
274422944501Smrg
2745a884aba1Smrg		pthread_mutex_lock(&bufmgr_gem->lock);
27462ee35494Smrg		if (!bo_gem->global_name) {
27472ee35494Smrg			bo_gem->global_name = flink.name;
27482ee35494Smrg			bo_gem->reusable = false;
2749a884aba1Smrg
27502ee35494Smrg			HASH_ADD(name_hh, bufmgr_gem->name_table,
27512ee35494Smrg				 global_name, sizeof(bo_gem->global_name),
27522ee35494Smrg				 bo_gem);
2753a884aba1Smrg		}
2754a884aba1Smrg		pthread_mutex_unlock(&bufmgr_gem->lock);
275522944501Smrg	}
275622944501Smrg
275722944501Smrg	*name = bo_gem->global_name;
275822944501Smrg	return 0;
275922944501Smrg}
276022944501Smrg
276122944501Smrg/**
276222944501Smrg * Enables unlimited caching of buffer objects for reuse.
276322944501Smrg *
276422944501Smrg * This is potentially very memory expensive, as the cache at each bucket
276522944501Smrg * size is only bounded by how many buffers of that size we've managed to have
276622944501Smrg * in flight at once.
276722944501Smrg */
2768424e9256Smrgvoid
276922944501Smrgdrm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
277022944501Smrg{
277122944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
277222944501Smrg
277320131375Smrg	bufmgr_gem->bo_reuse = true;
277422944501Smrg}
277522944501Smrg
27762ee35494Smrg/**
27772ee35494Smrg * Disables implicit synchronisation before executing the bo
27782ee35494Smrg *
27792ee35494Smrg * This will cause rendering corruption unless you correctly manage explicit
27802ee35494Smrg * fences for all rendering involving this buffer - including use by others.
27812ee35494Smrg * Disabling the implicit serialisation is only required if that serialisation
27822ee35494Smrg * is too coarse (for example, you have split the buffer into many
27832ee35494Smrg * non-overlapping regions and are sharing the whole buffer between concurrent
27842ee35494Smrg * independent command streams).
27852ee35494Smrg *
27862ee35494Smrg * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
27872ee35494Smrg * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
27882ee35494Smrg * or subsequent execbufs involving the bo will generate EINVAL.
27892ee35494Smrg */
27902ee35494Smrgvoid
27912ee35494Smrgdrm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
27922ee35494Smrg{
27932ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
27942ee35494Smrg
27952ee35494Smrg	bo_gem->kflags |= EXEC_OBJECT_ASYNC;
27962ee35494Smrg}
27972ee35494Smrg
27982ee35494Smrg/**
27992ee35494Smrg * Enables implicit synchronisation before executing the bo
28002ee35494Smrg *
28012ee35494Smrg * This is the default behaviour of the kernel, to wait upon prior writes
28022ee35494Smrg * completing on the object before rendering with it, or to wait for prior
28032ee35494Smrg * reads to complete before writing into the object.
28042ee35494Smrg * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
28052ee35494Smrg * the kernel never to insert a stall before using the object. Then this
28062ee35494Smrg * function can be used to restore the implicit sync before subsequent
28072ee35494Smrg * rendering.
28082ee35494Smrg */
28092ee35494Smrgvoid
28102ee35494Smrgdrm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
28112ee35494Smrg{
28122ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
28132ee35494Smrg
28142ee35494Smrg	bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
28152ee35494Smrg}
28162ee35494Smrg
28172ee35494Smrg/**
28182ee35494Smrg * Query whether the kernel supports disabling of its implicit synchronisation
28192ee35494Smrg * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
28202ee35494Smrg */
28212ee35494Smrgint
28222ee35494Smrgdrm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
28232ee35494Smrg{
28242ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
28252ee35494Smrg
28262ee35494Smrg	return bufmgr_gem->has_exec_async;
28272ee35494Smrg}
28282ee35494Smrg
282922944501Smrg/**
283022944501Smrg * Enable use of fenced reloc type.
283122944501Smrg *
283222944501Smrg * New code should enable this to avoid unnecessary fence register
283322944501Smrg * allocation.  If this option is not enabled, all relocs will have fence
283422944501Smrg * register allocated.
283522944501Smrg */
2836424e9256Smrgvoid
283722944501Smrgdrm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
283822944501Smrg{
283922944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
284022944501Smrg
284122944501Smrg	if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
284220131375Smrg		bufmgr_gem->fenced_relocs = true;
284322944501Smrg}
284422944501Smrg
284522944501Smrg/**
284622944501Smrg * Return the additional aperture space required by the tree of buffer objects
284722944501Smrg * rooted at bo.
284822944501Smrg */
284922944501Smrgstatic int
285022944501Smrgdrm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
285122944501Smrg{
285222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
285322944501Smrg	int i;
285422944501Smrg	int total = 0;
285522944501Smrg
285622944501Smrg	if (bo == NULL || bo_gem->included_in_check_aperture)
285722944501Smrg		return 0;
285822944501Smrg
285922944501Smrg	total += bo->size;
286020131375Smrg	bo_gem->included_in_check_aperture = true;
286122944501Smrg
286222944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++)
286322944501Smrg		total +=
286422944501Smrg		    drm_intel_gem_bo_get_aperture_space(bo_gem->
286522944501Smrg							reloc_target_info[i].bo);
286622944501Smrg
286722944501Smrg	return total;
286822944501Smrg}
286922944501Smrg
287022944501Smrg/**
287122944501Smrg * Count the number of buffers in this list that need a fence reg
287222944501Smrg *
287322944501Smrg * If the count is greater than the number of available regs, we'll have
287422944501Smrg * to ask the caller to resubmit a batch with fewer tiled buffers.
287522944501Smrg *
287622944501Smrg * This function over-counts if the same buffer is used multiple times.
287722944501Smrg */
287822944501Smrgstatic unsigned int
287922944501Smrgdrm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
288022944501Smrg{
288122944501Smrg	int i;
288222944501Smrg	unsigned int total = 0;
288322944501Smrg
288422944501Smrg	for (i = 0; i < count; i++) {
288522944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
288622944501Smrg
288722944501Smrg		if (bo_gem == NULL)
288822944501Smrg			continue;
288922944501Smrg
289022944501Smrg		total += bo_gem->reloc_tree_fences;
289122944501Smrg	}
289222944501Smrg	return total;
289322944501Smrg}
289422944501Smrg
289522944501Smrg/**
289622944501Smrg * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
289722944501Smrg * for the next drm_intel_bufmgr_check_aperture_space() call.
289822944501Smrg */
289922944501Smrgstatic void
290022944501Smrgdrm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
290122944501Smrg{
290222944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
290322944501Smrg	int i;
290422944501Smrg
290522944501Smrg	if (bo == NULL || !bo_gem->included_in_check_aperture)
290622944501Smrg		return;
290722944501Smrg
290820131375Smrg	bo_gem->included_in_check_aperture = false;
290922944501Smrg
291022944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++)
291122944501Smrg		drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
291222944501Smrg							   reloc_target_info[i].bo);
291322944501Smrg}
291422944501Smrg
291522944501Smrg/**
291622944501Smrg * Return a conservative estimate for the amount of aperture required
291722944501Smrg * for a collection of buffers. This may double-count some buffers.
291822944501Smrg */
291922944501Smrgstatic unsigned int
292022944501Smrgdrm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
292122944501Smrg{
292222944501Smrg	int i;
292322944501Smrg	unsigned int total = 0;
292422944501Smrg
292522944501Smrg	for (i = 0; i < count; i++) {
292622944501Smrg		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
292722944501Smrg		if (bo_gem != NULL)
292822944501Smrg			total += bo_gem->reloc_tree_size;
292922944501Smrg	}
293022944501Smrg	return total;
293122944501Smrg}
293222944501Smrg
293322944501Smrg/**
293422944501Smrg * Return the amount of aperture needed for a collection of buffers.
293522944501Smrg * This avoids double counting any buffers, at the cost of looking
293622944501Smrg * at every buffer in the set.
293722944501Smrg */
293822944501Smrgstatic unsigned int
293922944501Smrgdrm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
294022944501Smrg{
294122944501Smrg	int i;
294222944501Smrg	unsigned int total = 0;
294322944501Smrg
294422944501Smrg	for (i = 0; i < count; i++) {
294522944501Smrg		total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
294622944501Smrg		/* For the first buffer object in the array, we get an
294722944501Smrg		 * accurate count back for its reloc_tree size (since nothing
294822944501Smrg		 * had been flagged as being counted yet).  We can save that
294922944501Smrg		 * value out as a more conservative reloc_tree_size that
295022944501Smrg		 * avoids double-counting target buffers.  Since the first
295122944501Smrg		 * buffer happens to usually be the batch buffer in our
295222944501Smrg		 * callers, this can pull us back from doing the tree
295322944501Smrg		 * walk on every new batch emit.
295422944501Smrg		 */
295522944501Smrg		if (i == 0) {
295622944501Smrg			drm_intel_bo_gem *bo_gem =
295722944501Smrg			    (drm_intel_bo_gem *) bo_array[i];
295822944501Smrg			bo_gem->reloc_tree_size = total;
295922944501Smrg		}
296022944501Smrg	}
296122944501Smrg
296222944501Smrg	for (i = 0; i < count; i++)
296322944501Smrg		drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
296422944501Smrg	return total;
296522944501Smrg}
296622944501Smrg
296722944501Smrg/**
296822944501Smrg * Return -1 if the batchbuffer should be flushed before attempting to
296922944501Smrg * emit rendering referencing the buffers pointed to by bo_array.
297022944501Smrg *
297122944501Smrg * This is required because if we try to emit a batchbuffer with relocations
297222944501Smrg * to a tree of buffers that won't simultaneously fit in the aperture,
297322944501Smrg * the rendering will return an error at a point where the software is not
297422944501Smrg * prepared to recover from it.
297522944501Smrg *
297622944501Smrg * However, we also want to emit the batchbuffer significantly before we reach
297722944501Smrg * the limit, as a series of batchbuffers each of which references buffers
297822944501Smrg * covering almost all of the aperture means that at each emit we end up
297922944501Smrg * waiting to evict a buffer from the last rendering, and we get synchronous
298022944501Smrg * performance.  By emitting smaller batchbuffers, we eat some CPU overhead to
298122944501Smrg * get better parallelism.
298222944501Smrg */
298322944501Smrgstatic int
298422944501Smrgdrm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
298522944501Smrg{
298622944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem =
298722944501Smrg	    (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
298822944501Smrg	unsigned int total = 0;
298922944501Smrg	unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
299022944501Smrg	int total_fences;
299122944501Smrg
299222944501Smrg	/* Check for fence reg constraints if necessary */
299322944501Smrg	if (bufmgr_gem->available_fences) {
299422944501Smrg		total_fences = drm_intel_gem_total_fences(bo_array, count);
299522944501Smrg		if (total_fences > bufmgr_gem->available_fences)
299622944501Smrg			return -ENOSPC;
299722944501Smrg	}
299822944501Smrg
299922944501Smrg	total = drm_intel_gem_estimate_batch_space(bo_array, count);
300022944501Smrg
300122944501Smrg	if (total > threshold)
300222944501Smrg		total = drm_intel_gem_compute_batch_space(bo_array, count);
300322944501Smrg
300422944501Smrg	if (total > threshold) {
300522944501Smrg		DBG("check_space: overflowed available aperture, "
300622944501Smrg		    "%dkb vs %dkb\n",
300722944501Smrg		    total / 1024, (int)bufmgr_gem->gtt_size / 1024);
300822944501Smrg		return -ENOSPC;
300922944501Smrg	} else {
301022944501Smrg		DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
301122944501Smrg		    (int)bufmgr_gem->gtt_size / 1024);
301222944501Smrg		return 0;
301322944501Smrg	}
301422944501Smrg}
301522944501Smrg
301622944501Smrg/*
301722944501Smrg * Disable buffer reuse for objects which are shared with the kernel
301822944501Smrg * as scanout buffers
301922944501Smrg */
302022944501Smrgstatic int
302122944501Smrgdrm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
302222944501Smrg{
302322944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
302422944501Smrg
302520131375Smrg	bo_gem->reusable = false;
302622944501Smrg	return 0;
302722944501Smrg}
302822944501Smrg
3029aaba2545Smrgstatic int
3030aaba2545Smrgdrm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
3031aaba2545Smrg{
3032aaba2545Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3033aaba2545Smrg
3034aaba2545Smrg	return bo_gem->reusable;
3035aaba2545Smrg}
3036aaba2545Smrg
303722944501Smrgstatic int
303822944501Smrg_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
303922944501Smrg{
304022944501Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
304122944501Smrg	int i;
304222944501Smrg
304322944501Smrg	for (i = 0; i < bo_gem->reloc_count; i++) {
304422944501Smrg		if (bo_gem->reloc_target_info[i].bo == target_bo)
304522944501Smrg			return 1;
3046aaba2545Smrg		if (bo == bo_gem->reloc_target_info[i].bo)
3047aaba2545Smrg			continue;
304822944501Smrg		if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
304922944501Smrg						target_bo))
305022944501Smrg			return 1;
305122944501Smrg	}
305222944501Smrg
3053fe517fc9Smrg	for (i = 0; i< bo_gem->softpin_target_count; i++) {
3054fe517fc9Smrg		if (bo_gem->softpin_target[i] == target_bo)
3055fe517fc9Smrg			return 1;
3056fe517fc9Smrg		if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
3057fe517fc9Smrg			return 1;
3058fe517fc9Smrg	}
3059fe517fc9Smrg
306022944501Smrg	return 0;
306122944501Smrg}
306222944501Smrg
306322944501Smrg/** Return true if target_bo is referenced by bo's relocation tree. */
306422944501Smrgstatic int
306522944501Smrgdrm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
306622944501Smrg{
306722944501Smrg	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
306822944501Smrg
306922944501Smrg	if (bo == NULL || target_bo == NULL)
307022944501Smrg		return 0;
307122944501Smrg	if (target_bo_gem->used_as_reloc_target)
307222944501Smrg		return _drm_intel_gem_bo_references(bo, target_bo);
307322944501Smrg	return 0;
307422944501Smrg}
307522944501Smrg
3076aaba2545Smrgstatic void
3077aaba2545Smrgadd_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3078aaba2545Smrg{
3079aaba2545Smrg	unsigned int i = bufmgr_gem->num_buckets;
3080aaba2545Smrg
3081aaba2545Smrg	assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3082aaba2545Smrg
3083aaba2545Smrg	DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3084aaba2545Smrg	bufmgr_gem->cache_bucket[i].size = size;
3085aaba2545Smrg	bufmgr_gem->num_buckets++;
3086aaba2545Smrg}
3087aaba2545Smrg
3088aaba2545Smrgstatic void
3089aaba2545Smrginit_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3090aaba2545Smrg{
3091aaba2545Smrg	unsigned long size, cache_max_size = 64 * 1024 * 1024;
3092aaba2545Smrg
3093aaba2545Smrg	/* OK, so power of two buckets was too wasteful of memory.
3094aaba2545Smrg	 * Give 3 other sizes between each power of two, to hopefully
3095aaba2545Smrg	 * cover things accurately enough.  (The alternative is
3096aaba2545Smrg	 * probably to just go for exact matching of sizes, and assume
3097aaba2545Smrg	 * that for things like composited window resize the tiled
3098aaba2545Smrg	 * width/height alignment and rounding of sizes to pages will
3099aaba2545Smrg	 * get us useful cache hit rates anyway)
3100aaba2545Smrg	 */
3101aaba2545Smrg	add_bucket(bufmgr_gem, 4096);
3102aaba2545Smrg	add_bucket(bufmgr_gem, 4096 * 2);
3103aaba2545Smrg	add_bucket(bufmgr_gem, 4096 * 3);
3104aaba2545Smrg
3105aaba2545Smrg	/* Initialize the linked lists for BO reuse cache. */
3106aaba2545Smrg	for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3107aaba2545Smrg		add_bucket(bufmgr_gem, size);
3108aaba2545Smrg
3109aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 1 / 4);
3110aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 2 / 4);
3111aaba2545Smrg		add_bucket(bufmgr_gem, size + size * 3 / 4);
3112aaba2545Smrg	}
3113aaba2545Smrg}
3114aaba2545Smrg
3115424e9256Smrgvoid
311620131375Smrgdrm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
311720131375Smrg{
311820131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
311920131375Smrg
312020131375Smrg	bufmgr_gem->vma_max = limit;
312120131375Smrg
312220131375Smrg	drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
312320131375Smrg}
312420131375Smrg
31252ee35494Smrgstatic int
31262ee35494Smrgparse_devid_override(const char *devid_override)
31272ee35494Smrg{
31282ee35494Smrg	static const struct {
31292ee35494Smrg		const char *name;
31302ee35494Smrg		int pci_id;
31312ee35494Smrg	} name_map[] = {
31322ee35494Smrg		{ "brw", PCI_CHIP_I965_GM },
31332ee35494Smrg		{ "g4x", PCI_CHIP_GM45_GM },
31342ee35494Smrg		{ "ilk", PCI_CHIP_ILD_G },
31352ee35494Smrg		{ "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
31362ee35494Smrg		{ "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
31372ee35494Smrg		{ "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
31382ee35494Smrg		{ "byt", PCI_CHIP_VALLEYVIEW_3 },
31392ee35494Smrg		{ "bdw", 0x1620 | BDW_ULX },
31402ee35494Smrg		{ "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
31412ee35494Smrg		{ "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
31422ee35494Smrg	};
31432ee35494Smrg	unsigned int i;
31442ee35494Smrg
31452ee35494Smrg	for (i = 0; i < ARRAY_SIZE(name_map); i++) {
31462ee35494Smrg		if (!strcmp(name_map[i].name, devid_override))
31472ee35494Smrg			return name_map[i].pci_id;
31482ee35494Smrg	}
31492ee35494Smrg
31502ee35494Smrg	return strtod(devid_override, NULL);
31512ee35494Smrg}
31522ee35494Smrg
315320131375Smrg/**
315420131375Smrg * Get the PCI ID for the device.  This can be overridden by setting the
315520131375Smrg * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
315620131375Smrg */
315720131375Smrgstatic int
315820131375Smrgget_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
315920131375Smrg{
316020131375Smrg	char *devid_override;
3161424e9256Smrg	int devid = 0;
316220131375Smrg	int ret;
316320131375Smrg	drm_i915_getparam_t gp;
316420131375Smrg
316520131375Smrg	if (geteuid() == getuid()) {
316620131375Smrg		devid_override = getenv("INTEL_DEVID_OVERRIDE");
316720131375Smrg		if (devid_override) {
316820131375Smrg			bufmgr_gem->no_exec = true;
31692ee35494Smrg			return parse_devid_override(devid_override);
317020131375Smrg		}
317120131375Smrg	}
317220131375Smrg
3173424e9256Smrg	memclear(gp);
317420131375Smrg	gp.param = I915_PARAM_CHIPSET_ID;
317520131375Smrg	gp.value = &devid;
317620131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
317720131375Smrg	if (ret) {
317820131375Smrg		fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
317920131375Smrg		fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
318020131375Smrg	}
318120131375Smrg	return devid;
318220131375Smrg}
318320131375Smrg
3184424e9256Smrgint
318520131375Smrgdrm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
318620131375Smrg{
318720131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
318820131375Smrg
318920131375Smrg	return bufmgr_gem->pci_device;
319020131375Smrg}
319120131375Smrg
319220131375Smrg/**
319320131375Smrg * Sets the AUB filename.
319420131375Smrg *
319520131375Smrg * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
319620131375Smrg * for it to have any effect.
319720131375Smrg */
3198424e9256Smrgvoid
319920131375Smrgdrm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
320020131375Smrg				      const char *filename)
320120131375Smrg{
320220131375Smrg}
320320131375Smrg
320420131375Smrg/**
320520131375Smrg * Sets up AUB dumping.
320620131375Smrg *
320720131375Smrg * This is a trace file format that can be used with the simulator.
320820131375Smrg * Packets are emitted in a format somewhat like GPU command packets.
320920131375Smrg * You can set up a GTT and upload your objects into the referenced
321020131375Smrg * space, then send off batchbuffers and get BMPs out the other end.
321120131375Smrg */
3212424e9256Smrgvoid
321320131375Smrgdrm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
321420131375Smrg{
3215fe517fc9Smrg	fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3216fe517fc9Smrg		"Use intel_aubdump from intel-gpu-tools instead.  Install intel-gpu-tools,\n"
3217fe517fc9Smrg		"then run (for example)\n\n"
3218fe517fc9Smrg		"\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3219fe517fc9Smrg		"See the intel_aubdump man page for more details.\n");
322020131375Smrg}
322120131375Smrg
3222424e9256Smrgdrm_intel_context *
322320131375Smrgdrm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
322420131375Smrg{
322520131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
322620131375Smrg	struct drm_i915_gem_context_create create;
322720131375Smrg	drm_intel_context *context = NULL;
322820131375Smrg	int ret;
322920131375Smrg
323020131375Smrg	context = calloc(1, sizeof(*context));
323120131375Smrg	if (!context)
323220131375Smrg		return NULL;
323320131375Smrg
3234424e9256Smrg	memclear(create);
323520131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
323620131375Smrg	if (ret != 0) {
323720131375Smrg		DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
323820131375Smrg		    strerror(errno));
323920131375Smrg		free(context);
324020131375Smrg		return NULL;
324120131375Smrg	}
324220131375Smrg
324320131375Smrg	context->ctx_id = create.ctx_id;
324420131375Smrg	context->bufmgr = bufmgr;
324520131375Smrg
324620131375Smrg	return context;
324720131375Smrg}
324820131375Smrg
32492ee35494Smrgint
32502ee35494Smrgdrm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
32512ee35494Smrg{
32522ee35494Smrg	if (ctx == NULL)
32532ee35494Smrg		return -EINVAL;
32542ee35494Smrg
32552ee35494Smrg	*ctx_id = ctx->ctx_id;
32562ee35494Smrg
32572ee35494Smrg	return 0;
32582ee35494Smrg}
32592ee35494Smrg
3260424e9256Smrgvoid
326120131375Smrgdrm_intel_gem_context_destroy(drm_intel_context *ctx)
326220131375Smrg{
326320131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
326420131375Smrg	struct drm_i915_gem_context_destroy destroy;
326520131375Smrg	int ret;
326620131375Smrg
326720131375Smrg	if (ctx == NULL)
326820131375Smrg		return;
326920131375Smrg
3270424e9256Smrg	memclear(destroy);
327120131375Smrg
327220131375Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
327320131375Smrg	destroy.ctx_id = ctx->ctx_id;
327420131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
327520131375Smrg		       &destroy);
327620131375Smrg	if (ret != 0)
327720131375Smrg		fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
327820131375Smrg			strerror(errno));
327920131375Smrg
328020131375Smrg	free(ctx);
328120131375Smrg}
328220131375Smrg
3283424e9256Smrgint
328420131375Smrgdrm_intel_get_reset_stats(drm_intel_context *ctx,
328520131375Smrg			  uint32_t *reset_count,
328620131375Smrg			  uint32_t *active,
328720131375Smrg			  uint32_t *pending)
328820131375Smrg{
328920131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
329020131375Smrg	struct drm_i915_reset_stats stats;
329120131375Smrg	int ret;
329220131375Smrg
329320131375Smrg	if (ctx == NULL)
329420131375Smrg		return -EINVAL;
329520131375Smrg
3296424e9256Smrg	memclear(stats);
329720131375Smrg
329820131375Smrg	bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
329920131375Smrg	stats.ctx_id = ctx->ctx_id;
330020131375Smrg	ret = drmIoctl(bufmgr_gem->fd,
330120131375Smrg		       DRM_IOCTL_I915_GET_RESET_STATS,
330220131375Smrg		       &stats);
330320131375Smrg	if (ret == 0) {
330420131375Smrg		if (reset_count != NULL)
330520131375Smrg			*reset_count = stats.reset_count;
330620131375Smrg
330720131375Smrg		if (active != NULL)
330820131375Smrg			*active = stats.batch_active;
330920131375Smrg
331020131375Smrg		if (pending != NULL)
331120131375Smrg			*pending = stats.batch_pending;
331220131375Smrg	}
331320131375Smrg
331420131375Smrg	return ret;
331520131375Smrg}
331620131375Smrg
3317424e9256Smrgint
331820131375Smrgdrm_intel_reg_read(drm_intel_bufmgr *bufmgr,
331920131375Smrg		   uint32_t offset,
332020131375Smrg		   uint64_t *result)
332120131375Smrg{
332220131375Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
332320131375Smrg	struct drm_i915_reg_read reg_read;
332420131375Smrg	int ret;
332520131375Smrg
3326424e9256Smrg	memclear(reg_read);
332720131375Smrg	reg_read.offset = offset;
332820131375Smrg
332920131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
333020131375Smrg
333120131375Smrg	*result = reg_read.val;
333220131375Smrg	return ret;
333320131375Smrg}
333420131375Smrg
3335424e9256Smrgint
3336424e9256Smrgdrm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3337424e9256Smrg{
3338424e9256Smrg	drm_i915_getparam_t gp;
3339424e9256Smrg	int ret;
3340424e9256Smrg
3341424e9256Smrg	memclear(gp);
3342424e9256Smrg	gp.value = (int*)subslice_total;
3343424e9256Smrg	gp.param = I915_PARAM_SUBSLICE_TOTAL;
3344424e9256Smrg	ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3345424e9256Smrg	if (ret)
3346424e9256Smrg		return -errno;
3347424e9256Smrg
3348424e9256Smrg	return 0;
3349424e9256Smrg}
3350424e9256Smrg
3351424e9256Smrgint
3352424e9256Smrgdrm_intel_get_eu_total(int fd, unsigned int *eu_total)
3353424e9256Smrg{
3354424e9256Smrg	drm_i915_getparam_t gp;
3355424e9256Smrg	int ret;
3356424e9256Smrg
3357424e9256Smrg	memclear(gp);
3358424e9256Smrg	gp.value = (int*)eu_total;
3359424e9256Smrg	gp.param = I915_PARAM_EU_TOTAL;
3360424e9256Smrg	ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3361424e9256Smrg	if (ret)
3362424e9256Smrg		return -errno;
3363424e9256Smrg
3364424e9256Smrg	return 0;
3365424e9256Smrg}
336620131375Smrg
33672ee35494Smrgint
33682ee35494Smrgdrm_intel_get_pooled_eu(int fd)
33692ee35494Smrg{
33702ee35494Smrg	drm_i915_getparam_t gp;
33712ee35494Smrg	int ret = -1;
33722ee35494Smrg
33732ee35494Smrg	memclear(gp);
33742ee35494Smrg	gp.param = I915_PARAM_HAS_POOLED_EU;
33752ee35494Smrg	gp.value = &ret;
33762ee35494Smrg	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
33772ee35494Smrg		return -errno;
33782ee35494Smrg
33792ee35494Smrg	return ret;
33802ee35494Smrg}
33812ee35494Smrg
33822ee35494Smrgint
33832ee35494Smrgdrm_intel_get_min_eu_in_pool(int fd)
33842ee35494Smrg{
33852ee35494Smrg	drm_i915_getparam_t gp;
33862ee35494Smrg	int ret = -1;
33872ee35494Smrg
33882ee35494Smrg	memclear(gp);
33892ee35494Smrg	gp.param = I915_PARAM_MIN_EU_IN_POOL;
33902ee35494Smrg	gp.value = &ret;
33912ee35494Smrg	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
33922ee35494Smrg		return -errno;
33932ee35494Smrg
33942ee35494Smrg	return ret;
33952ee35494Smrg}
33962ee35494Smrg
339720131375Smrg/**
339820131375Smrg * Annotate the given bo for use in aub dumping.
339920131375Smrg *
340020131375Smrg * \param annotations is an array of drm_intel_aub_annotation objects
340120131375Smrg * describing the type of data in various sections of the bo.  Each
340220131375Smrg * element of the array specifies the type and subtype of a section of
340320131375Smrg * the bo, and the past-the-end offset of that section.  The elements
340420131375Smrg * of \c annotations must be sorted so that ending_offset is
340520131375Smrg * increasing.
340620131375Smrg *
340720131375Smrg * \param count is the number of elements in the \c annotations array.
340820131375Smrg * If \c count is zero, then \c annotations will not be dereferenced.
340920131375Smrg *
341020131375Smrg * Annotations are copied into a private data structure, so caller may
341120131375Smrg * re-use the memory pointed to by \c annotations after the call
341220131375Smrg * returns.
341320131375Smrg *
341420131375Smrg * Annotations are stored for the lifetime of the bo; to reset to the
341520131375Smrg * default state (no annotations), call this function with a \c count
341620131375Smrg * of zero.
341720131375Smrg */
3418424e9256Smrgvoid
341920131375Smrgdrm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
342020131375Smrg					 drm_intel_aub_annotation *annotations,
342120131375Smrg					 unsigned count)
342220131375Smrg{
342320131375Smrg}
342420131375Smrg
3425a884aba1Smrgstatic pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3426a884aba1Smrgstatic drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3427a884aba1Smrg
3428a884aba1Smrgstatic drm_intel_bufmgr_gem *
3429a884aba1Smrgdrm_intel_bufmgr_gem_find(int fd)
3430a884aba1Smrg{
3431a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
3432a884aba1Smrg
3433a884aba1Smrg	DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3434a884aba1Smrg		if (bufmgr_gem->fd == fd) {
3435a884aba1Smrg			atomic_inc(&bufmgr_gem->refcount);
3436a884aba1Smrg			return bufmgr_gem;
3437a884aba1Smrg		}
3438a884aba1Smrg	}
3439a884aba1Smrg
3440a884aba1Smrg	return NULL;
3441a884aba1Smrg}
3442a884aba1Smrg
3443a884aba1Smrgstatic void
3444a884aba1Smrgdrm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3445a884aba1Smrg{
3446a884aba1Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3447a884aba1Smrg
3448a884aba1Smrg	if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3449a884aba1Smrg		pthread_mutex_lock(&bufmgr_list_mutex);
3450a884aba1Smrg
3451a884aba1Smrg		if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3452a884aba1Smrg			DRMLISTDEL(&bufmgr_gem->managers);
3453a884aba1Smrg			drm_intel_bufmgr_gem_destroy(bufmgr);
3454a884aba1Smrg		}
3455a884aba1Smrg
3456a884aba1Smrg		pthread_mutex_unlock(&bufmgr_list_mutex);
3457a884aba1Smrg	}
3458a884aba1Smrg}
3459a884aba1Smrg
34602ee35494Smrgvoid *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
34612ee35494Smrg{
34622ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
34632ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
34642ee35494Smrg
34652ee35494Smrg	if (bo_gem->gtt_virtual)
34662ee35494Smrg		return bo_gem->gtt_virtual;
34672ee35494Smrg
34682ee35494Smrg	if (bo_gem->is_userptr)
34692ee35494Smrg		return NULL;
34702ee35494Smrg
34712ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
34722ee35494Smrg	if (bo_gem->gtt_virtual == NULL) {
34732ee35494Smrg		struct drm_i915_gem_mmap_gtt mmap_arg;
34742ee35494Smrg		void *ptr;
34752ee35494Smrg
34762ee35494Smrg		DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
34772ee35494Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
34782ee35494Smrg
34792ee35494Smrg		if (bo_gem->map_count++ == 0)
34802ee35494Smrg			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
34812ee35494Smrg
34822ee35494Smrg		memclear(mmap_arg);
34832ee35494Smrg		mmap_arg.handle = bo_gem->gem_handle;
34842ee35494Smrg
34852ee35494Smrg		/* Get the fake offset back... */
34862ee35494Smrg		ptr = MAP_FAILED;
34872ee35494Smrg		if (drmIoctl(bufmgr_gem->fd,
34882ee35494Smrg			     DRM_IOCTL_I915_GEM_MMAP_GTT,
34892ee35494Smrg			     &mmap_arg) == 0) {
34902ee35494Smrg			/* and mmap it */
34912ee35494Smrg			ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
34922ee35494Smrg				       MAP_SHARED, bufmgr_gem->fd,
34932ee35494Smrg				       mmap_arg.offset);
34942ee35494Smrg		}
34952ee35494Smrg		if (ptr == MAP_FAILED) {
34962ee35494Smrg			if (--bo_gem->map_count == 0)
34972ee35494Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
34982ee35494Smrg			ptr = NULL;
34992ee35494Smrg		}
35002ee35494Smrg
35012ee35494Smrg		bo_gem->gtt_virtual = ptr;
35022ee35494Smrg	}
35032ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
35042ee35494Smrg
35052ee35494Smrg	return bo_gem->gtt_virtual;
35062ee35494Smrg}
35072ee35494Smrg
35082ee35494Smrgvoid *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
35092ee35494Smrg{
35102ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
35112ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
35122ee35494Smrg
35132ee35494Smrg	if (bo_gem->mem_virtual)
35142ee35494Smrg		return bo_gem->mem_virtual;
35152ee35494Smrg
35162ee35494Smrg	if (bo_gem->is_userptr) {
35172ee35494Smrg		/* Return the same user ptr */
35182ee35494Smrg		return bo_gem->user_virtual;
35192ee35494Smrg	}
35202ee35494Smrg
35212ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
35222ee35494Smrg	if (!bo_gem->mem_virtual) {
35232ee35494Smrg		struct drm_i915_gem_mmap mmap_arg;
35242ee35494Smrg
35252ee35494Smrg		if (bo_gem->map_count++ == 0)
35262ee35494Smrg			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
35272ee35494Smrg
35282ee35494Smrg		DBG("bo_map: %d (%s), map_count=%d\n",
35292ee35494Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
35302ee35494Smrg
35312ee35494Smrg		memclear(mmap_arg);
35322ee35494Smrg		mmap_arg.handle = bo_gem->gem_handle;
35332ee35494Smrg		mmap_arg.size = bo->size;
35342ee35494Smrg		if (drmIoctl(bufmgr_gem->fd,
35352ee35494Smrg			     DRM_IOCTL_I915_GEM_MMAP,
35362ee35494Smrg			     &mmap_arg)) {
35372ee35494Smrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
35382ee35494Smrg			    __FILE__, __LINE__, bo_gem->gem_handle,
35392ee35494Smrg			    bo_gem->name, strerror(errno));
35402ee35494Smrg			if (--bo_gem->map_count == 0)
35412ee35494Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
35422ee35494Smrg		} else {
35432ee35494Smrg			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
35442ee35494Smrg			bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
35452ee35494Smrg		}
35462ee35494Smrg	}
35472ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
35482ee35494Smrg
35492ee35494Smrg	return bo_gem->mem_virtual;
35502ee35494Smrg}
35512ee35494Smrg
35522ee35494Smrgvoid *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
35532ee35494Smrg{
35542ee35494Smrg	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
35552ee35494Smrg	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
35562ee35494Smrg
35572ee35494Smrg	if (bo_gem->wc_virtual)
35582ee35494Smrg		return bo_gem->wc_virtual;
35592ee35494Smrg
35602ee35494Smrg	if (bo_gem->is_userptr)
35612ee35494Smrg		return NULL;
35622ee35494Smrg
35632ee35494Smrg	pthread_mutex_lock(&bufmgr_gem->lock);
35642ee35494Smrg	if (!bo_gem->wc_virtual) {
35652ee35494Smrg		struct drm_i915_gem_mmap mmap_arg;
35662ee35494Smrg
35672ee35494Smrg		if (bo_gem->map_count++ == 0)
35682ee35494Smrg			drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
35692ee35494Smrg
35702ee35494Smrg		DBG("bo_map: %d (%s), map_count=%d\n",
35712ee35494Smrg		    bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
35722ee35494Smrg
35732ee35494Smrg		memclear(mmap_arg);
35742ee35494Smrg		mmap_arg.handle = bo_gem->gem_handle;
35752ee35494Smrg		mmap_arg.size = bo->size;
35762ee35494Smrg		mmap_arg.flags = I915_MMAP_WC;
35772ee35494Smrg		if (drmIoctl(bufmgr_gem->fd,
35782ee35494Smrg			     DRM_IOCTL_I915_GEM_MMAP,
35792ee35494Smrg			     &mmap_arg)) {
35802ee35494Smrg			DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
35812ee35494Smrg			    __FILE__, __LINE__, bo_gem->gem_handle,
35822ee35494Smrg			    bo_gem->name, strerror(errno));
35832ee35494Smrg			if (--bo_gem->map_count == 0)
35842ee35494Smrg				drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
35852ee35494Smrg		} else {
35862ee35494Smrg			VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
35872ee35494Smrg			bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
35882ee35494Smrg		}
35892ee35494Smrg	}
35902ee35494Smrg	pthread_mutex_unlock(&bufmgr_gem->lock);
35912ee35494Smrg
35922ee35494Smrg	return bo_gem->wc_virtual;
35932ee35494Smrg}
35942ee35494Smrg
359522944501Smrg/**
359622944501Smrg * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
359722944501Smrg * and manage map buffer objections.
359822944501Smrg *
359922944501Smrg * \param fd File descriptor of the opened DRM device.
360022944501Smrg */
3601424e9256Smrgdrm_intel_bufmgr *
360222944501Smrgdrm_intel_bufmgr_gem_init(int fd, int batch_size)
360322944501Smrg{
360422944501Smrg	drm_intel_bufmgr_gem *bufmgr_gem;
360522944501Smrg	struct drm_i915_gem_get_aperture aperture;
360622944501Smrg	drm_i915_getparam_t gp;
360720131375Smrg	int ret, tmp;
360820131375Smrg	bool exec2 = false;
360922944501Smrg
3610a884aba1Smrg	pthread_mutex_lock(&bufmgr_list_mutex);
3611a884aba1Smrg
3612a884aba1Smrg	bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3613a884aba1Smrg	if (bufmgr_gem)
3614a884aba1Smrg		goto exit;
3615a884aba1Smrg
361622944501Smrg	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
361722944501Smrg	if (bufmgr_gem == NULL)
3618a884aba1Smrg		goto exit;
361922944501Smrg
362022944501Smrg	bufmgr_gem->fd = fd;
3621a884aba1Smrg	atomic_set(&bufmgr_gem->refcount, 1);
362222944501Smrg
362322944501Smrg	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
362422944501Smrg		free(bufmgr_gem);
3625a884aba1Smrg		bufmgr_gem = NULL;
3626a884aba1Smrg		goto exit;
362722944501Smrg	}
362822944501Smrg
3629424e9256Smrg	memclear(aperture);
36306d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd,
36316d98c517Smrg		       DRM_IOCTL_I915_GEM_GET_APERTURE,
36326d98c517Smrg		       &aperture);
363322944501Smrg
363422944501Smrg	if (ret == 0)
363522944501Smrg		bufmgr_gem->gtt_size = aperture.aper_available_size;
363622944501Smrg	else {
363722944501Smrg		fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
363822944501Smrg			strerror(errno));
363922944501Smrg		bufmgr_gem->gtt_size = 128 * 1024 * 1024;
364022944501Smrg		fprintf(stderr, "Assuming %dkB available aperture size.\n"
364122944501Smrg			"May lead to reduced performance or incorrect "
364222944501Smrg			"rendering.\n",
364322944501Smrg			(int)bufmgr_gem->gtt_size / 1024);
364422944501Smrg	}
364522944501Smrg
364620131375Smrg	bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
364722944501Smrg
364820131375Smrg	if (IS_GEN2(bufmgr_gem->pci_device))
364922944501Smrg		bufmgr_gem->gen = 2;
365020131375Smrg	else if (IS_GEN3(bufmgr_gem->pci_device))
365122944501Smrg		bufmgr_gem->gen = 3;
365220131375Smrg	else if (IS_GEN4(bufmgr_gem->pci_device))
365322944501Smrg		bufmgr_gem->gen = 4;
365420131375Smrg	else if (IS_GEN5(bufmgr_gem->pci_device))
365520131375Smrg		bufmgr_gem->gen = 5;
365620131375Smrg	else if (IS_GEN6(bufmgr_gem->pci_device))
365722944501Smrg		bufmgr_gem->gen = 6;
365820131375Smrg	else if (IS_GEN7(bufmgr_gem->pci_device))
365920131375Smrg		bufmgr_gem->gen = 7;
366020131375Smrg	else if (IS_GEN8(bufmgr_gem->pci_device))
366120131375Smrg		bufmgr_gem->gen = 8;
36623c748557Ssnj	else if (IS_GEN9(bufmgr_gem->pci_device))
36633c748557Ssnj		bufmgr_gem->gen = 9;
36640655efefSmrg	else if (IS_GEN10(bufmgr_gem->pci_device))
36650655efefSmrg		bufmgr_gem->gen = 10;
366620131375Smrg	else {
366720131375Smrg		free(bufmgr_gem);
3668a884aba1Smrg		bufmgr_gem = NULL;
3669a884aba1Smrg		goto exit;
367020131375Smrg	}
367120131375Smrg
367220131375Smrg	if (IS_GEN3(bufmgr_gem->pci_device) &&
367320131375Smrg	    bufmgr_gem->gtt_size > 256*1024*1024) {
367420131375Smrg		/* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
367520131375Smrg		 * be used for tiled blits. To simplify the accounting, just
3676fe517fc9Smrg		 * subtract the unmappable part (fixed to 256MB on all known
367720131375Smrg		 * gen3 devices) if the kernel advertises it. */
367820131375Smrg		bufmgr_gem->gtt_size -= 256*1024*1024;
367920131375Smrg	}
368020131375Smrg
3681424e9256Smrg	memclear(gp);
368220131375Smrg	gp.value = &tmp;
368322944501Smrg
368422944501Smrg	gp.param = I915_PARAM_HAS_EXECBUF2;
36856d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
368622944501Smrg	if (!ret)
368720131375Smrg		exec2 = true;
368822944501Smrg
3689aaba2545Smrg	gp.param = I915_PARAM_HAS_BSD;
36906d98c517Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
36919ce4edccSmrg	bufmgr_gem->has_bsd = ret == 0;
36929ce4edccSmrg
36939ce4edccSmrg	gp.param = I915_PARAM_HAS_BLT;
36949ce4edccSmrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
36959ce4edccSmrg	bufmgr_gem->has_blt = ret == 0;
36969ce4edccSmrg
36979ce4edccSmrg	gp.param = I915_PARAM_HAS_RELAXED_FENCING;
36989ce4edccSmrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
36999ce4edccSmrg	bufmgr_gem->has_relaxed_fencing = ret == 0;
3700aaba2545Smrg
37012ee35494Smrg	gp.param = I915_PARAM_HAS_EXEC_ASYNC;
37022ee35494Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
37032ee35494Smrg	bufmgr_gem->has_exec_async = ret == 0;
37042ee35494Smrg
3705424e9256Smrg	bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3706a884aba1Smrg
370720131375Smrg	gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
370820131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
370920131375Smrg	bufmgr_gem->has_wait_timeout = ret == 0;
371020131375Smrg
371120131375Smrg	gp.param = I915_PARAM_HAS_LLC;
371220131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
371320131375Smrg	if (ret != 0) {
371420131375Smrg		/* Kernel does not supports HAS_LLC query, fallback to GPU
371520131375Smrg		 * generation detection and assume that we have LLC on GEN6/7
371620131375Smrg		 */
371720131375Smrg		bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
371820131375Smrg				IS_GEN7(bufmgr_gem->pci_device));
371920131375Smrg	} else
372020131375Smrg		bufmgr_gem->has_llc = *gp.value;
372120131375Smrg
372220131375Smrg	gp.param = I915_PARAM_HAS_VEBOX;
372320131375Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
372420131375Smrg	bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
372520131375Smrg
3726fe517fc9Smrg	gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3727fe517fc9Smrg	ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3728fe517fc9Smrg	if (ret == 0 && *gp.value > 0)
3729fe517fc9Smrg		bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3730fe517fc9Smrg
373122944501Smrg	if (bufmgr_gem->gen < 4) {
373222944501Smrg		gp.param = I915_PARAM_NUM_FENCES_AVAIL;
373322944501Smrg		gp.value = &bufmgr_gem->available_fences;
37346d98c517Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
373522944501Smrg		if (ret) {
373622944501Smrg			fprintf(stderr, "get fences failed: %d [%d]\n", ret,
373722944501Smrg				errno);
373822944501Smrg			fprintf(stderr, "param: %d, val: %d\n", gp.param,
373922944501Smrg				*gp.value);
374022944501Smrg			bufmgr_gem->available_fences = 0;
374122944501Smrg		} else {
374222944501Smrg			/* XXX The kernel reports the total number of fences,
374322944501Smrg			 * including any that may be pinned.
374422944501Smrg			 *
374522944501Smrg			 * We presume that there will be at least one pinned
374622944501Smrg			 * fence for the scanout buffer, but there may be more
374722944501Smrg			 * than one scanout and the user may be manually
374822944501Smrg			 * pinning buffers. Let's move to execbuffer2 and
374922944501Smrg			 * thereby forget the insanity of using fences...
375022944501Smrg			 */
375122944501Smrg			bufmgr_gem->available_fences -= 2;
375222944501Smrg			if (bufmgr_gem->available_fences < 0)
375322944501Smrg				bufmgr_gem->available_fences = 0;
375422944501Smrg		}
375522944501Smrg	}
375622944501Smrg
3757fe517fc9Smrg	if (bufmgr_gem->gen >= 8) {
3758fe517fc9Smrg		gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3759fe517fc9Smrg		ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3760fe517fc9Smrg		if (ret == 0 && *gp.value == 3)
3761fe517fc9Smrg			bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3762fe517fc9Smrg	}
3763fe517fc9Smrg
376422944501Smrg	/* Let's go with one relocation per every 2 dwords (but round down a bit
376522944501Smrg	 * since a power of two will mean an extra page allocation for the reloc
376622944501Smrg	 * buffer).
376722944501Smrg	 *
376822944501Smrg	 * Every 4 was too few for the blender benchmark.
376922944501Smrg	 */
377022944501Smrg	bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
377122944501Smrg
377222944501Smrg	bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
377322944501Smrg	bufmgr_gem->bufmgr.bo_alloc_for_render =
377422944501Smrg	    drm_intel_gem_bo_alloc_for_render;
377522944501Smrg	bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
377622944501Smrg	bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
377722944501Smrg	bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
377822944501Smrg	bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
377922944501Smrg	bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
378022944501Smrg	bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
378122944501Smrg	bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
378222944501Smrg	bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
378322944501Smrg	bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
378422944501Smrg	bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
378522944501Smrg	bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
378622944501Smrg	bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
378722944501Smrg	bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
378822944501Smrg	bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
378922944501Smrg	bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
379022944501Smrg	/* Use the new one if available */
3791aaba2545Smrg	if (exec2) {
379222944501Smrg		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
37939ce4edccSmrg		bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3794aaba2545Smrg	} else
379522944501Smrg		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
379622944501Smrg	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
379722944501Smrg	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3798a884aba1Smrg	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
379922944501Smrg	bufmgr_gem->bufmgr.debug = 0;
380022944501Smrg	bufmgr_gem->bufmgr.check_aperture_space =
380122944501Smrg	    drm_intel_gem_check_aperture_space;
380222944501Smrg	bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3803aaba2545Smrg	bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
380422944501Smrg	bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
380522944501Smrg	    drm_intel_gem_get_pipe_from_crtc_id;
380622944501Smrg	bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
380722944501Smrg
3808aaba2545Smrg	init_cache_buckets(bufmgr_gem);
380922944501Smrg
381020131375Smrg	DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
381120131375Smrg	bufmgr_gem->vma_max = -1; /* unlimited by default */
381220131375Smrg
3813a884aba1Smrg	DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3814a884aba1Smrg
3815a884aba1Smrgexit:
3816a884aba1Smrg	pthread_mutex_unlock(&bufmgr_list_mutex);
3817a884aba1Smrg
3818a884aba1Smrg	return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
381922944501Smrg}
3820